diff --git ethereum/go-ethereum/accounts/abi/abi.go taikoxyz/taiko-geth/accounts/abi/abi.go
index 55576070a4ae1028db82f2d64a5f8ae3d8fafb2c..6e1075c715fddd2e727ca0f20c6f4adb46fd4b6c 100644
--- ethereum/go-ethereum/accounts/abi/abi.go
+++ taikoxyz/taiko-geth/accounts/abi/abi.go
@@ -22,6 +22,7 @@ "encoding/json"
"errors"
"fmt"
"io"
+ "math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
@@ -246,24 +247,65 @@
// revertSelector is a special function selector for revert reason unpacking.
var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4]
+// panicSelector is a special function selector for panic reason unpacking.
+var panicSelector = crypto.Keccak256([]byte("Panic(uint256)"))[:4]
+
+// panicReasons map is for readable panic codes
+// see this linkage for the deails
+// https://docs.soliditylang.org/en/v0.8.21/control-structures.html#panic-via-assert-and-error-via-require
+// the reason string list is copied from ether.js
+// https://github.com/ethers-io/ethers.js/blob/fa3a883ff7c88611ce766f58bdd4b8ac90814470/src.ts/abi/interface.ts#L207-L218
+var panicReasons = map[uint64]string{
+ 0x00: "generic panic",
+ 0x01: "assert(false)",
+ 0x11: "arithmetic underflow or overflow",
+ 0x12: "division or modulo by zero",
+ 0x21: "enum overflow",
+ 0x22: "invalid encoded storage byte array accessed",
+ 0x31: "out-of-bounds array access; popping on an empty array",
+ 0x32: "out-of-bounds access of an array or bytesN",
+ 0x41: "out of memory",
+ 0x51: "uninitialized function",
+}
+
// UnpackRevert resolves the abi-encoded revert reason. According to the solidity
// spec https://solidity.readthedocs.io/en/latest/control-structures.html#revert,
-// the provided revert reason is abi-encoded as if it were a call to a function
-// `Error(string)`. So it's a special tool for it.
+// the provided revert reason is abi-encoded as if it were a call to function
+// `Error(string)` or `Panic(uint256)`. So it's a special tool for it.
func UnpackRevert(data []byte) (string, error) {
if len(data) < 4 {
return "", errors.New("invalid data for unpacking")
}
- if !bytes.Equal(data[:4], revertSelector) {
+ switch {
+ case bytes.Equal(data[:4], revertSelector):
+ typ, err := NewType("string", "", nil)
+ if err != nil {
+ return "", err
+ }
+ unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:])
+ if err != nil {
+ return "", err
+ }
+ return unpacked[0].(string), nil
+ case bytes.Equal(data[:4], panicSelector):
+ typ, err := NewType("uint256", "", nil)
+ if err != nil {
+ return "", err
+ }
+ unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:])
+ if err != nil {
+ return "", err
+ }
+ pCode := unpacked[0].(*big.Int)
+ // uint64 safety check for future
+ // but the code is not bigger than MAX(uint64) now
+ if pCode.IsUint64() {
+ if reason, ok := panicReasons[pCode.Uint64()]; ok {
+ return reason, nil
+ }
+ }
+ return fmt.Sprintf("unknown panic code: %#x", pCode), nil
+ default:
return "", errors.New("invalid data for unpacking")
}
- typ, err := NewType("string", "", nil)
- if err != nil {
- return "", err
- }
- unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:])
- if err != nil {
- return "", err
- }
- return unpacked[0].(string), nil
}
diff --git ethereum/go-ethereum/accounts/abi/abi_test.go taikoxyz/taiko-geth/accounts/abi/abi_test.go
index 3486ffd1a517b9772a1dba0f4e6a3689695f89dd..84175df4bb93e7d4ab532df545fbd02ddfd983fc 100644
--- ethereum/go-ethereum/accounts/abi/abi_test.go
+++ taikoxyz/taiko-geth/accounts/abi/abi_test.go
@@ -1173,6 +1173,8 @@ }{
{"", "", errors.New("invalid data for unpacking")},
{"08c379a1", "", errors.New("invalid data for unpacking")},
{"08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d72657665727420726561736f6e00000000000000000000000000000000000000", "revert reason", nil},
+ {"4e487b710000000000000000000000000000000000000000000000000000000000000000", "generic panic", nil},
+ {"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
}
for index, c := range cases {
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
diff --git ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go taikoxyz/taiko-geth/accounts/abi/bind/backends/simulated.go
index 83b91f7f8d59351f1056f3f35af6439ef71d9046..8549976480bc2096c29c42a347f0cfcbcd519093 100644
--- ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go
+++ taikoxyz/taiko-geth/accounts/abi/bind/backends/simulated.go
@@ -199,7 +199,6 @@ stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
if err != nil {
return nil, err
}
-
return stateDB.GetCode(contract), nil
}
@@ -212,7 +211,6 @@ stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
if err != nil {
return nil, err
}
-
return stateDB.GetBalance(contract), nil
}
@@ -225,7 +223,6 @@ stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
if err != nil {
return 0, err
}
-
return stateDB.GetNonce(contract), nil
}
@@ -238,7 +235,6 @@ stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
if err != nil {
return nil, err
}
-
val := stateDB.GetState(contract, key)
return val[:], nil
}
@@ -610,8 +606,7 @@ // Gas prices post 1559 need to be initialized
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
}
- head := b.blockchain.CurrentHeader()
- if !b.blockchain.Config().IsLondon(head.Number) {
+ if !b.blockchain.Config().IsLondon(header.Number) {
// If there's no basefee, then it must be a non-1559 execution
if call.GasPrice == nil {
call.GasPrice = new(big.Int)
@@ -633,13 +628,13 @@ }
// Backfill the legacy gasPrice for EVM execution, unless we're all zeroes
call.GasPrice = new(big.Int)
if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 {
- call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap)
+ call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, header.BaseFee), call.GasFeeCap)
}
}
}
// Ensure message is initialized properly.
if call.Gas == 0 {
- call.Gas = 50000000
+ call.Gas = 10 * header.GasLimit
}
if call.Value == nil {
call.Value = new(big.Int)
@@ -700,8 +695,10 @@ block.AddTxWithChain(b.blockchain, tx)
}
block.AddTxWithChain(b.blockchain, tx)
})
- stateDB, _ := b.blockchain.State()
-
+ stateDB, err := b.blockchain.State()
+ if err != nil {
+ return err
+ }
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
b.pendingReceipts = receipts[0]
@@ -821,11 +818,12 @@
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
block.OffsetTime(int64(adjustment.Seconds()))
})
- stateDB, _ := b.blockchain.State()
-
+ stateDB, err := b.blockchain.State()
+ if err != nil {
+ return err
+ }
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
-
return nil
}
@@ -892,7 +890,7 @@ return rawdb.ReadReceipts(fb.db, hash, *number, header.Time, fb.bc.Config()), nil
}
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
- logs := rawdb.ReadLogs(fb.db, hash, number, fb.bc.Config())
+ logs := rawdb.ReadLogs(fb.db, hash, number)
return logs, nil
}
diff --git ethereum/go-ethereum/accounts/abi/method.go taikoxyz/taiko-geth/accounts/abi/method.go
index f69e3ee9b562dd33959a03871bf6eae0e183fccc..b6e1eef3cf3f1730186b78f07ea5505e5da8792a 100644
--- ethereum/go-ethereum/accounts/abi/method.go
+++ taikoxyz/taiko-geth/accounts/abi/method.go
@@ -127,11 +127,12 @@ if state != "" {
state = state + " "
}
identity := fmt.Sprintf("function %v", rawName)
- if funType == Fallback {
+ switch funType {
+ case Fallback:
identity = "fallback"
- } else if funType == Receive {
+ case Receive:
identity = "receive"
- } else if funType == Constructor {
+ case Constructor:
identity = "constructor"
}
str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", "))
diff --git ethereum/go-ethereum/accounts/abi/method_test.go taikoxyz/taiko-geth/accounts/abi/method_test.go
index 395a5289654aee77c826e2200ca7cc1cb1a74a48..9230e307aa41b09ad4cad9f7c116adc2cf5ee26b 100644
--- ethereum/go-ethereum/accounts/abi/method_test.go
+++ taikoxyz/taiko-geth/accounts/abi/method_test.go
@@ -84,11 +84,12 @@ }
for _, test := range table {
var got string
- if test.method == "fallback" {
+ switch test.method {
+ case "fallback":
got = abi.Fallback.String()
- } else if test.method == "receive" {
+ case "receive":
got = abi.Receive.String()
- } else {
+ default:
got = abi.Methods[test.method].String()
}
if got != test.expectation {
diff --git ethereum/go-ethereum/accounts/abi/unpack.go taikoxyz/taiko-geth/accounts/abi/unpack.go
index 68451483cdaa234f0b39a8a3e182fd1b6c594962..905b5ce629db5684495aa87da39c9cfdee6bc69c 100644
--- ethereum/go-ethereum/accounts/abi/unpack.go
+++ taikoxyz/taiko-geth/accounts/abi/unpack.go
@@ -160,13 +160,14 @@
// this value will become our slice or our array, depending on the type
var refSlice reflect.Value
- if t.T == SliceTy {
+ switch t.T {
+ case SliceTy:
// declare our slice
refSlice = reflect.MakeSlice(t.GetType(), size, size)
- } else if t.T == ArrayTy {
+ case ArrayTy:
// declare our array
refSlice = reflect.New(t.GetType()).Elem()
- } else {
+ default:
return nil, errors.New("abi: invalid type in array/slice unpacking stage")
}
diff --git ethereum/go-ethereum/accounts/keystore/watch.go taikoxyz/taiko-geth/accounts/keystore/watch.go
index 3f64b89c585e57135b71d1f4cfaddb79124b8ad2..a9f87e7c323e11ed47b603776f737d756400aae2 100644
--- ethereum/go-ethereum/accounts/keystore/watch.go
+++ taikoxyz/taiko-geth/accounts/keystore/watch.go
@@ -20,6 +20,7 @@
package keystore
import (
+ "os"
"time"
"github.com/ethereum/go-ethereum/log"
@@ -77,7 +78,9 @@ return
}
defer watcher.Close()
if err := watcher.Add(w.ac.keydir); err != nil {
- logger.Warn("Failed to watch keystore folder", "err", err)
+ if !os.IsNotExist(err) {
+ logger.Warn("Failed to watch keystore folder", "err", err)
+ }
return
}
diff --git ethereum/go-ethereum/accounts/scwallet/README.md taikoxyz/taiko-geth/accounts/scwallet/README.md
index 4313d9c6b2f8ecacf53657409572f53e482bbbe4..28079c47435cffb174136638572c225c19a8d840 100644
--- ethereum/go-ethereum/accounts/scwallet/README.md
+++ taikoxyz/taiko-geth/accounts/scwallet/README.md
@@ -8,7 +8,7 @@ * PCSCD version 4.3 running on your system **Only version 4.3 is currently supported**
## Preparing the smartcard
- **WARNING: FOILLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS**
+ **WARNING: FOLLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS**
You can use status' [keycard-cli](https://github.com/status-im/keycard-cli) and you should get _at least_ version 2.1.1 of their [smartcard application](https://github.com/status-im/status-keycard/releases/download/2.2.1/keycard_v2.2.1.cap)
diff --git ethereum/go-ethereum/build/checksums.txt taikoxyz/taiko-geth/build/checksums.txt
index 6e33f456d142c22d436df62c20ad83ed9c31c16b..c752fb8ccd64593d5dfbb66d4a4ee912d281f6bb 100644
--- ethereum/go-ethereum/build/checksums.txt
+++ taikoxyz/taiko-geth/build/checksums.txt
@@ -1,20 +1,30 @@
# This file contains sha256 checksums of optional build dependencies.
-2c5ee9c9ec1e733b0dbbc2bdfed3f62306e51d8172bf38f4f4e542b27520f597 go1.20.7.src.tar.gz
-785170eab380a8985d53896808b0a71336d0ea60e0a26099b4ccec77798b1cf4 go1.20.7.darwin-amd64.tar.gz
-eea1e7e4c2f75c72629050e6a6c7c46c446d64056732a7787fb3ba16ace1982e go1.20.7.darwin-arm64.tar.gz
-d8cff0357ac24eb06f3f280535397eeaacf95611d29f9b2abc3060f3d6dce3b4 go1.20.7.freebsd-386.tar.gz
-26918dcebf474a9e81ccf9f648cdf36968dfb76b481518cf615d78455dda4416 go1.20.7.freebsd-amd64.tar.gz
-ddb48145f05bda2f4617a22c979d4e94b22802cdb1a1fde1b1974e733b26f091 go1.20.7.linux-386.tar.gz
-f0a87f1bcae91c4b69f8dc2bc6d7e6bfcd7524fceec130af525058c0c17b1b44 go1.20.7.linux-amd64.tar.gz
-44781ae3b153c3b07651d93b6bc554e835a36e2d72a696281c1e4dad9efffe43 go1.20.7.linux-arm64.tar.gz
-7cc231b415b94f2f7065870a73f67dd2b0ec12b5a98052b7ee0121c42bc04f8d go1.20.7.linux-armv6l.tar.gz
-6318a1db307c12b8afe68808bd6fae4fba1e558a85b958216096869ed506dcb3 go1.20.7.linux-ppc64le.tar.gz
-26aea2ede8722ceecbd9db883328a8d963136fd96c11dacc356c44c4c19c6515 go1.20.7.linux-s390x.tar.gz
-5b0ef6f58d3e04d6cc003aa98e9172f41ba9e091b1c98e7339b41c4c87fb78a1 go1.20.7.windows-386.zip
-736dc6c7fcab1c96b682c8c93e38d7e371e62a17d34cb2c37d451a1147f66af9 go1.20.7.windows-amd64.zip
-fc6f79c1e1ed9e506c65f2112ac4e387479916f1accb0d046a6a19ff6938baa5 go1.20.7.windows-arm64.zip
+# version:spec-tests 1.0.5
+# https://github.com/ethereum/execution-spec-tests/releases
+# https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.5/
+d4fd06a0e5f94beb970f3c68374b38ef9de82d4be77517d326bcf739c3cbf3a2 fixtures_develop.tar.gz
+# version:golang 1.21.3
+# https://go.dev/dl/
+186f2b6f8c8b704e696821b09ab2041a5c1ee13dcbc3156a13adcf75931ee488 go1.21.3.src.tar.gz
+27014fc69e301d7588a169ca239b3cc609f0aa1abf38528bf0d20d3b259211eb go1.21.3.darwin-amd64.tar.gz
+65302a7a9f7a4834932b3a7a14cb8be51beddda757b567a2f9e0cbd0d7b5a6ab go1.21.3.darwin-arm64.tar.gz
+8e0cd2f66cf1bde9d07b4aee01e3d7c3cfdd14e20650488e1683da4b8492594a go1.21.3.freebsd-386.tar.gz
+6e74f65f586e93d1f3947894766f69e9b2ebda488592a09df61f36f06bfe58a8 go1.21.3.freebsd-amd64.tar.gz
+fb209fd070db500a84291c5a95251cceeb1723e8f6142de9baca5af70a927c0e go1.21.3.linux-386.tar.gz
+1241381b2843fae5a9707eec1f8fb2ef94d827990582c7c7c32f5bdfbfd420c8 go1.21.3.linux-amd64.tar.gz
+fc90fa48ae97ba6368eecb914343590bbb61b388089510d0c56c2dde52987ef3 go1.21.3.linux-arm64.tar.gz
+a1ddcaaf0821a12a800884c14cb4268ce1c1f5a0301e9060646f1e15e611c6c7 go1.21.3.linux-armv6l.tar.gz
+3b0e10a3704f164a6e85e0377728ec5fd21524fabe4c925610e34076586d5826 go1.21.3.linux-ppc64le.tar.gz
+4c78e2e6f4c684a3d5a9bdc97202729053f44eb7be188206f0627ef3e18716b6 go1.21.3.linux-s390x.tar.gz
+e36737f4f2fadb4d2f919ec4ce517133a56e06064cca6e82fc883bb000c4d56c go1.21.3.windows-386.zip
+27c8daf157493f288d42a6f38debc6a2cb391f6543139eba9152fceca0be2a10 go1.21.3.windows-amd64.zip
+bfb7a5c56f9ded07d8ae0e0b3702ac07b65e68fa8f33da24ed6df4ce01fe2c5c go1.21.3.windows-arm64.zip
+
+# version:golangci 1.51.1
+# https://github.com/golangci/golangci-lint/releases/
+# https://github.com/golangci/golangci-lint/releases/download/v1.51.1/
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz
e06b3459aaed356e1667580be00b05f41f3b2e29685d12cdee571c23e1edb414 golangci-lint-1.51.1-freebsd-386.tar.gz
@@ -43,4 +53,12 @@ bce02f7232723cb727755ee11f168a700a00896a25d37f87c4b173bce55596b4 golangci-lint-1.51.1-windows-armv6.zip
cf6403f84707ce8c98664736772271bc8874f2e760c2fd0f00cf3e85963507e9 golangci-lint-1.51.1-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
+#
+# This version is fine to be old and full of security holes, we just use it
+# to build the latest Go. Don't change it. If it ever becomes insufficient,
+# we need to switch over to a recursive builder to jump across supported
+# versions.
+#
+# version:ppa-builder 1.19.6
+# https://go.dev/dl/
d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz
diff --git ethereum/go-ethereum/build/ci.go taikoxyz/taiko-geth/build/ci.go
index 242f4d841e5fa57787b7af545e1b8eac14e17704..1ff3fb5bf8261ed961dba7ff2a90c02e28c7c468 100644
--- ethereum/go-ethereum/build/ci.go
+++ taikoxyz/taiko-geth/build/ci.go
@@ -120,15 +120,15 @@
// Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
- // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish
+ // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish,
+ // kinetic
debDistroGoBoots = map[string]string{
- "trusty": "golang-1.11", // EOL: 04/2024
- "xenial": "golang-go", // EOL: 04/2026
- "bionic": "golang-go", // EOL: 04/2028
- "focal": "golang-go", // EOL: 04/2030
- "jammy": "golang-go", // EOL: 04/2032
- "kinetic": "golang-go", // EOL: 07/2023
- "lunar": "golang-go", // EOL: 01/2024
+ "trusty": "golang-1.11", // EOL: 04/2024
+ "xenial": "golang-go", // EOL: 04/2026
+ "bionic": "golang-go", // EOL: 04/2028
+ "focal": "golang-go", // EOL: 04/2030
+ "jammy": "golang-go", // EOL: 04/2032
+ "lunar": "golang-go", // EOL: 01/2024
}
debGoBootPaths = map[string]string{
@@ -136,18 +136,8 @@ "golang-1.11": "/usr/lib/go-1.11",
"golang-go": "/usr/lib/go",
}
- // This is the version of Go that will be downloaded by
- //
- // go run ci.go install -dlgo
- dlgoVersion = "1.20.7"
-
- // This is the version of Go that will be used to bootstrap the PPA builder.
- //
- // This version is fine to be old and full of security holes, we just use it
- // to build the latest Go. Don't change it. If it ever becomes insufficient,
- // we need to switch over to a recursive builder to jumpt across supported
- // versions.
- gobootVersion = "1.19.6"
+ // This is where the tests should be unpacked.
+ executionSpecTestsDir = "tests/spec-tests"
)
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@@ -185,6 +175,8 @@ case "nsis":
doWindowsInstaller(os.Args[2:])
case "purge":
doPurge(os.Args[2:])
+ case "sanitycheck":
+ doSanityCheck()
default:
log.Fatal("unknown command ", os.Args[1])
}
@@ -206,9 +198,8 @@ // Configure the toolchain.
tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
if *dlgo {
csdb := build.MustLoadChecksums("build/checksums.txt")
- tc.Root = build.DownloadGo(csdb, dlgoVersion)
+ tc.Root = build.DownloadGo(csdb)
}
-
// Disable CLI markdown doc generation in release builds.
buildTags := []string{"urfave_cli_no_docs"}
@@ -294,14 +285,18 @@ cc = flag.String("cc", "", "Sets C compiler binary")
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
verbose = flag.Bool("v", false, "Whether to log verbosely")
race = flag.Bool("race", false, "Execute the race detector")
+ cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads")
)
flag.CommandLine.Parse(cmdline)
+
+ // Get test fixtures.
+ csdb := build.MustLoadChecksums("build/checksums.txt")
+ downloadSpecTestFixtures(csdb, *cachedir)
// Configure the toolchain.
tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
if *dlgo {
- csdb := build.MustLoadChecksums("build/checksums.txt")
- tc.Root = build.DownloadGo(csdb, dlgoVersion)
+ tc.Root = build.DownloadGo(csdb)
}
gotest := tc.Go("test")
@@ -332,6 +327,25 @@ gotest.Args = append(gotest.Args, packages...)
build.MustRun(gotest)
}
+// downloadSpecTestFixtures downloads and extracts the execution-spec-tests fixtures.
+func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
+ executionSpecTestsVersion, err := build.Version(csdb, "spec-tests")
+ if err != nil {
+ log.Fatal(err)
+ }
+ ext := ".tar.gz"
+ base := "fixtures_develop" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename
+ url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/v%s/%s%s", executionSpecTestsVersion, base, ext)
+ archivePath := filepath.Join(cachedir, base+ext)
+ if err := csdb.DownloadFile(url, archivePath); err != nil {
+ log.Fatal(err)
+ }
+ if err := build.ExtractArchive(archivePath, executionSpecTestsDir); err != nil {
+ log.Fatal(err)
+ }
+ return filepath.Join(cachedir, base)
+}
+
// doLint runs golangci-lint on requested packages.
func doLint(cmdline []string) {
var (
@@ -351,9 +365,11 @@ }
// downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string {
- const version = "1.51.1"
-
csdb := build.MustLoadChecksums("build/checksums.txt")
+ version, err := build.Version(csdb, "golangci")
+ if err != nil {
+ log.Fatal(err)
+ }
arch := runtime.GOARCH
ext := ".tar.gz"
@@ -735,6 +751,10 @@ // downloadGoBootstrapSources downloads the Go source tarball that will be used
// to bootstrap the builder Go.
func downloadGoBootstrapSources(cachedir string) string {
csdb := build.MustLoadChecksums("build/checksums.txt")
+ gobootVersion, err := build.Version(csdb, "ppa-builder")
+ if err != nil {
+ log.Fatal(err)
+ }
file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion)
url := "https://dl.google.com/go/" + file
dst := filepath.Join(cachedir, file)
@@ -747,6 +767,10 @@
// downloadGoSources downloads the Go source tarball.
func downloadGoSources(cachedir string) string {
csdb := build.MustLoadChecksums("build/checksums.txt")
+ dlgoVersion, err := build.Version(csdb, "golang")
+ if err != nil {
+ log.Fatal(err)
+ }
file := fmt.Sprintf("go%s.src.tar.gz", dlgoVersion)
url := "https://dl.google.com/go/" + file
dst := filepath.Join(cachedir, file)
@@ -1073,3 +1097,7 @@ if err := build.AzureBlobstoreDelete(auth, blobs); err != nil {
log.Fatal(err)
}
}
+
+func doSanityCheck() {
+ build.DownloadAndVerifyChecksums(build.MustLoadChecksums("build/checksums.txt"))
+}
diff --git ethereum/go-ethereum/cmd/devp2p/internal/ethtest/chain.go taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/chain.go
index 5e3d9fe98daf50e0abdb1d34ffcdb08987813325..938159ec524d613a8e7f3ce4250bfe80246a2b5b 100644
--- ethereum/go-ethereum/cmd/devp2p/internal/ethtest/chain.go
+++ taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/chain.go
@@ -77,7 +77,7 @@ }
// ForkID gets the fork id of the chain.
func (c *Chain) ForkID() forkid.ID {
- return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()), c.blocks[0].Time())
+ return forkid.NewID(c.chainConfig, c.blocks[0], uint64(c.Len()), c.blocks[0].Time())
}
// Shorten returns a copy chain of a desired height from the imported
diff --git ethereum/go-ethereum/cmd/devp2p/internal/ethtest/chain_test.go taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/chain_test.go
index 67221923a684441973f2926301e570871136f542..de6acfdcda676c4b1d16b87d889cdfac3665b024 100644
--- ethereum/go-ethereum/cmd/devp2p/internal/ethtest/chain_test.go
+++ taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/chain_test.go
@@ -145,7 +145,7 @@ expected []*types.Header
}{
{
req: GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: uint64(2)},
Amount: uint64(5),
Skip: 1,
@@ -162,7 +162,7 @@ },
},
{
req: GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)},
Amount: uint64(3),
Skip: 0,
@@ -177,7 +177,7 @@ },
},
{
req: GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: chain.Head().Hash()},
Amount: uint64(1),
Skip: 0,
diff --git ethereum/go-ethereum/cmd/devp2p/internal/ethtest/helpers.go taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/helpers.go
index bc901bdeb01392410dc99c824023f4be80318279..a0339b88cbe4ebcc161fc94b8e2bb889e2e80f19 100644
--- ethereum/go-ethereum/cmd/devp2p/internal/ethtest/helpers.go
+++ taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/helpers.go
@@ -62,7 +62,6 @@ return nil, err
}
// set default p2p capabilities
conn.caps = []p2p.Cap{
- {Name: "eth", Version: 66},
{Name: "eth", Version: 67},
{Name: "eth", Version: 68},
}
@@ -237,8 +236,8 @@ if err != nil {
return errorf("could not get headers for inbound header request: %v", err)
}
resp := &BlockHeaders{
- RequestId: msg.ReqID(),
- BlockHeadersPacket: eth.BlockHeadersPacket(headers),
+ RequestId: msg.ReqID(),
+ BlockHeadersRequest: eth.BlockHeadersRequest(headers),
}
if err := c.Write(resp); err != nil {
return errorf("could not write to connection: %v", err)
@@ -267,7 +266,7 @@ resp, ok := msg.(*BlockHeaders)
if !ok {
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
}
- headers := []*types.Header(resp.BlockHeadersPacket)
+ headers := []*types.Header(resp.BlockHeadersRequest)
return headers, nil
}
@@ -379,7 +378,7 @@ defer conn.SetReadDeadline(time.Time{})
conn.SetReadDeadline(time.Now().Add(20 * time.Second))
// create request
req := &GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: block.Hash()},
Amount: 1,
},
@@ -604,8 +603,8 @@ pretty.Sdump(announcement),
pretty.Sdump(blockHeaderReq))
}
err = sendConn.Write(&BlockHeaders{
- RequestId: blockHeaderReq.ReqID(),
- BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()},
+ RequestId: blockHeaderReq.ReqID(),
+ BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()},
})
if err != nil {
return fmt.Errorf("failed to write to connection: %v", err)
diff --git ethereum/go-ethereum/cmd/devp2p/internal/ethtest/snap.go taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/snap.go
index f947e4bc9bae791bc1a0058fcadf7fae094632b8..54eb63f3deb2f1dcb16f5b70582473c832852e8e 100644
--- ethereum/go-ethereum/cmd/devp2p/internal/ethtest/snap.go
+++ taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/snap.go
@@ -27,8 +27,8 @@ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/internal/utesting"
- "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -58,7 +58,7 @@ // TestSnapGetAccountRange various forms of GetAccountRange requests.
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
var (
root = s.chain.RootAt(999)
- ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ ffHash = common.MaxHash
zero = common.Hash{}
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
@@ -125,7 +125,7 @@
// TestSnapGetStorageRanges various forms of GetStorageRanges requests.
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
var (
- ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ ffHash = common.MaxHash
zero = common.Hash{}
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
@@ -530,17 +530,13 @@ keys := make([][]byte, len(hashes))
for i, key := range hashes {
keys[i] = common.CopyBytes(key[:])
}
- nodes := make(light.NodeList, len(proof))
+ nodes := make(trienode.ProofList, len(proof))
for i, node := range proof {
nodes[i] = node
}
- proofdb := nodes.NodeSet()
+ proofdb := nodes.Set()
- var end []byte
- if len(keys) > 0 {
- end = keys[len(keys)-1]
- }
- _, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb)
+ _, err = trie.VerifyRangeProof(tc.root, tc.origin[:], keys, accounts, proofdb)
return err
}
diff --git ethereum/go-ethereum/cmd/devp2p/internal/ethtest/suite.go taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/suite.go
index 815353be72650c6aa871f80db76536eb60507ad6..0b56c8cf4b6f7be5fd6fec788c85705aefe04c8f 100644
--- ethereum/go-ethereum/cmd/devp2p/internal/ethtest/suite.go
+++ taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/suite.go
@@ -112,7 +112,7 @@ t.Fatalf("peering failed: %v", err)
}
// write request
req := &GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()},
Amount: 2,
Skip: 1,
@@ -150,7 +150,7 @@
// create two requests
req1 := &GetBlockHeaders{
RequestId: uint64(111),
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{
Hash: s.chain.blocks[1].Hash(),
},
@@ -161,7 +161,7 @@ },
}
req2 := &GetBlockHeaders{
RequestId: uint64(222),
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{
Hash: s.chain.blocks[1].Hash(),
},
@@ -201,10 +201,10 @@ expected2, err := s.chain.GetHeaders(req2)
if err != nil {
t.Fatalf("failed to get expected headers for request 2: %v", err)
}
- if !headersMatch(expected1, headers1.BlockHeadersPacket) {
+ if !headersMatch(expected1, headers1.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
}
- if !headersMatch(expected2, headers2.BlockHeadersPacket) {
+ if !headersMatch(expected2, headers2.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
}
}
@@ -224,7 +224,7 @@ // create requests
reqID := uint64(1234)
request1 := &GetBlockHeaders{
RequestId: reqID,
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{
Number: 1,
},
@@ -233,7 +233,7 @@ },
}
request2 := &GetBlockHeaders{
RequestId: reqID,
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{
Number: 33,
},
@@ -270,10 +270,10 @@ expected2, err := s.chain.GetHeaders(request2)
if err != nil {
t.Fatalf("failed to get expected block headers: %v", err)
}
- if !headersMatch(expected1, headers1.BlockHeadersPacket) {
+ if !headersMatch(expected1, headers1.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
}
- if !headersMatch(expected2, headers2.BlockHeadersPacket) {
+ if !headersMatch(expected2, headers2.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
}
}
@@ -290,7 +290,7 @@ if err := conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err)
}
req := &GetBlockHeaders{
- GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
+ GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: 0},
Amount: 2,
},
@@ -322,7 +322,7 @@ }
// create block bodies request
req := &GetBlockBodies{
RequestId: uint64(55),
- GetBlockBodiesPacket: eth.GetBlockBodiesPacket{
+ GetBlockBodiesRequest: eth.GetBlockBodiesRequest{
s.chain.blocks[54].Hash(),
s.chain.blocks[75].Hash(),
},
@@ -336,11 +336,11 @@ resp, ok := msg.(*BlockBodies)
if !ok {
t.Fatalf("unexpected: %s", pretty.Sdump(msg))
}
- bodies := resp.BlockBodiesPacket
+ bodies := resp.BlockBodiesResponse
t.Logf("received %d block bodies", len(bodies))
- if len(bodies) != len(req.GetBlockBodiesPacket) {
+ if len(bodies) != len(req.GetBlockBodiesRequest) {
t.Fatalf("wrong bodies in response: expected %d bodies, "+
- "got %d", len(req.GetBlockBodiesPacket), len(bodies))
+ "got %d", len(req.GetBlockBodiesRequest), len(bodies))
}
}
@@ -481,8 +481,8 @@ for _, hash := range hashMap {
hashes = append(hashes, hash)
}
getTxReq := &GetPooledTransactions{
- RequestId: 1234,
- GetPooledTransactionsPacket: hashes,
+ RequestId: 1234,
+ GetPooledTransactionsRequest: hashes,
}
if err = conn.Write(getTxReq); err != nil {
t.Fatalf("could not write to conn: %v", err)
@@ -490,7 +490,7 @@ }
// check that all received transactions match those that were sent to node
switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) {
case *PooledTransactions:
- for _, gotTx := range msg.PooledTransactionsPacket {
+ for _, gotTx := range msg.PooledTransactionsResponse {
if _, exists := hashMap[gotTx.Hash()]; !exists {
t.Fatalf("unexpected tx received: %v", gotTx.Hash())
}
@@ -547,8 +547,8 @@ for {
msg := conn.readAndServe(s.chain, timeout)
switch msg := msg.(type) {
case *GetPooledTransactions:
- if len(msg.GetPooledTransactionsPacket) != len(hashes) {
- t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket))
+ if len(msg.GetPooledTransactionsRequest) != len(hashes) {
+ t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
}
return
diff --git ethereum/go-ethereum/cmd/devp2p/internal/ethtest/suite_test.go taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/suite_test.go
index c5bcc3db1da494d03dc8ad458aa6afa88847fae4..7890c3134811accd6a78ec1554638e8a97e74780 100644
--- ethereum/go-ethereum/cmd/devp2p/internal/ethtest/suite_test.go
+++ taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/suite_test.go
@@ -120,6 +120,7 @@ })
if err != nil {
return err
}
+ backend.SetSynced()
_, err = backend.BlockChain().InsertChain(chain.blocks[1:])
return err
diff --git ethereum/go-ethereum/cmd/devp2p/internal/ethtest/types.go taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/types.go
index afa9a9c8c68586bfc23685fa7b67bfc42b0578f6..805d7a81b99af47e7fdc8d2ad09c688fdac953da 100644
--- ethereum/go-ethereum/cmd/devp2p/internal/ethtest/types.go
+++ taikoxyz/taiko-geth/cmd/devp2p/internal/ethtest/types.go
@@ -99,24 +99,24 @@ func (msg Transactions) Code() int { return 18 }
func (msg Transactions) ReqID() uint64 { return 18 }
// GetBlockHeaders represents a block header query.
-type GetBlockHeaders eth.GetBlockHeadersPacket66
+type GetBlockHeaders eth.GetBlockHeadersPacket
func (msg GetBlockHeaders) Code() int { return 19 }
func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId }
-type BlockHeaders eth.BlockHeadersPacket66
+type BlockHeaders eth.BlockHeadersPacket
func (msg BlockHeaders) Code() int { return 20 }
func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId }
// GetBlockBodies represents a GetBlockBodies request
-type GetBlockBodies eth.GetBlockBodiesPacket66
+type GetBlockBodies eth.GetBlockBodiesPacket
func (msg GetBlockBodies) Code() int { return 21 }
func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId }
// BlockBodies is the network packet for block content distribution.
-type BlockBodies eth.BlockBodiesPacket66
+type BlockBodies eth.BlockBodiesPacket
func (msg BlockBodies) Code() int { return 22 }
func (msg BlockBodies) ReqID() uint64 { return msg.RequestId }
@@ -128,7 +128,7 @@ func (msg NewBlock) Code() int { return 23 }
func (msg NewBlock) ReqID() uint64 { return 0 }
// NewPooledTransactionHashes66 is the network packet for the tx hash propagation message.
-type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66
+type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67
func (msg NewPooledTransactionHashes66) Code() int { return 24 }
func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 }
@@ -139,12 +139,12 @@
func (msg NewPooledTransactionHashes) Code() int { return 24 }
func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 }
-type GetPooledTransactions eth.GetPooledTransactionsPacket66
+type GetPooledTransactions eth.GetPooledTransactionsPacket
func (msg GetPooledTransactions) Code() int { return 25 }
func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId }
-type PooledTransactions eth.PooledTransactionsPacket66
+type PooledTransactions eth.PooledTransactionsPacket
func (msg PooledTransactions) Code() int { return 26 }
func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId }
@@ -180,25 +180,25 @@ msg = new(Disconnect)
case (Status{}).Code():
msg = new(Status)
case (GetBlockHeaders{}).Code():
- ethMsg := new(eth.GetBlockHeadersPacket66)
+ ethMsg := new(eth.GetBlockHeadersPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
return (*GetBlockHeaders)(ethMsg)
case (BlockHeaders{}).Code():
- ethMsg := new(eth.BlockHeadersPacket66)
+ ethMsg := new(eth.BlockHeadersPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
return (*BlockHeaders)(ethMsg)
case (GetBlockBodies{}).Code():
- ethMsg := new(eth.GetBlockBodiesPacket66)
+ ethMsg := new(eth.GetBlockBodiesPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
return (*GetBlockBodies)(ethMsg)
case (BlockBodies{}).Code():
- ethMsg := new(eth.BlockBodiesPacket66)
+ ethMsg := new(eth.BlockBodiesPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
@@ -217,13 +217,13 @@ return ethMsg
}
msg = new(NewPooledTransactionHashes66)
case (GetPooledTransactions{}.Code()):
- ethMsg := new(eth.GetPooledTransactionsPacket66)
+ ethMsg := new(eth.GetPooledTransactionsPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
return (*GetPooledTransactions)(ethMsg)
case (PooledTransactions{}.Code()):
- ethMsg := new(eth.PooledTransactionsPacket66)
+ ethMsg := new(eth.PooledTransactionsPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
diff --git ethereum/go-ethereum/cmd/evm/internal/t8ntool/block.go taikoxyz/taiko-geth/cmd/evm/internal/t8ntool/block.go
index 09dca8984e9c89c8132fd6e1b6caf00b3f7600d2..5c0e28e284c0c3090eb0f971310707456c1cf1b2 100644
--- ethereum/go-ethereum/cmd/evm/internal/t8ntool/block.go
+++ taikoxyz/taiko-geth/cmd/evm/internal/t8ntool/block.go
@@ -37,33 +37,38 @@ )
//go:generate go run github.com/fjl/gencodec -type header -field-override headerMarshaling -out gen_header.go
type header struct {
- ParentHash common.Hash `json:"parentHash"`
- OmmerHash *common.Hash `json:"sha3Uncles"`
- Coinbase *common.Address `json:"miner"`
- Root common.Hash `json:"stateRoot" gencodec:"required"`
- TxHash *common.Hash `json:"transactionsRoot"`
- ReceiptHash *common.Hash `json:"receiptsRoot"`
- Bloom types.Bloom `json:"logsBloom"`
- Difficulty *big.Int `json:"difficulty"`
- Number *big.Int `json:"number" gencodec:"required"`
- GasLimit uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed uint64 `json:"gasUsed"`
- Time uint64 `json:"timestamp" gencodec:"required"`
- Extra []byte `json:"extraData"`
- MixDigest common.Hash `json:"mixHash"`
- Nonce *types.BlockNonce `json:"nonce"`
- BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
- WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
+ ParentHash common.Hash `json:"parentHash"`
+ OmmerHash *common.Hash `json:"sha3Uncles"`
+ Coinbase *common.Address `json:"miner"`
+ Root common.Hash `json:"stateRoot" gencodec:"required"`
+ TxHash *common.Hash `json:"transactionsRoot"`
+ ReceiptHash *common.Hash `json:"receiptsRoot"`
+ Bloom types.Bloom `json:"logsBloom"`
+ Difficulty *big.Int `json:"difficulty"`
+ Number *big.Int `json:"number" gencodec:"required"`
+ GasLimit uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed uint64 `json:"gasUsed"`
+ Time uint64 `json:"timestamp" gencodec:"required"`
+ Extra []byte `json:"extraData"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce *types.BlockNonce `json:"nonce"`
+ BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
+ WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
+ BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
+ ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"`
+ ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
}
type headerMarshaling struct {
- Difficulty *math.HexOrDecimal256
- Number *math.HexOrDecimal256
- GasLimit math.HexOrDecimal64
- GasUsed math.HexOrDecimal64
- Time math.HexOrDecimal64
- Extra hexutil.Bytes
- BaseFee *math.HexOrDecimal256
+ Difficulty *math.HexOrDecimal256
+ Number *math.HexOrDecimal256
+ GasLimit math.HexOrDecimal64
+ GasUsed math.HexOrDecimal64
+ Time math.HexOrDecimal64
+ Extra hexutil.Bytes
+ BaseFee *math.HexOrDecimal256
+ BlobGasUsed *math.HexOrDecimal64
+ ExcessBlobGas *math.HexOrDecimal64
}
type bbInput struct {
@@ -113,22 +118,25 @@
// ToBlock converts i into a *types.Block
func (i *bbInput) ToBlock() *types.Block {
header := &types.Header{
- ParentHash: i.Header.ParentHash,
- UncleHash: types.EmptyUncleHash,
- Coinbase: common.Address{},
- Root: i.Header.Root,
- TxHash: types.EmptyTxsHash,
- ReceiptHash: types.EmptyReceiptsHash,
- Bloom: i.Header.Bloom,
- Difficulty: common.Big0,
- Number: i.Header.Number,
- GasLimit: i.Header.GasLimit,
- GasUsed: i.Header.GasUsed,
- Time: i.Header.Time,
- Extra: i.Header.Extra,
- MixDigest: i.Header.MixDigest,
- BaseFee: i.Header.BaseFee,
- WithdrawalsHash: i.Header.WithdrawalsHash,
+ ParentHash: i.Header.ParentHash,
+ UncleHash: types.EmptyUncleHash,
+ Coinbase: common.Address{},
+ Root: i.Header.Root,
+ TxHash: types.EmptyTxsHash,
+ ReceiptHash: types.EmptyReceiptsHash,
+ Bloom: i.Header.Bloom,
+ Difficulty: common.Big0,
+ Number: i.Header.Number,
+ GasLimit: i.Header.GasLimit,
+ GasUsed: i.Header.GasUsed,
+ Time: i.Header.Time,
+ Extra: i.Header.Extra,
+ MixDigest: i.Header.MixDigest,
+ BaseFee: i.Header.BaseFee,
+ WithdrawalsHash: i.Header.WithdrawalsHash,
+ BlobGasUsed: i.Header.BlobGasUsed,
+ ExcessBlobGas: i.Header.ExcessBlobGas,
+ ParentBeaconRoot: i.Header.ParentBeaconBlockRoot,
}
// Fill optional values.
@@ -150,7 +158,7 @@ }
if i.Header.Nonce != nil {
header.Nonce = *i.Header.Nonce
}
- if header.Difficulty != nil {
+ if i.Header.Difficulty != nil {
header.Difficulty = i.Header.Difficulty
}
return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals)
diff --git ethereum/go-ethereum/cmd/evm/internal/t8ntool/execution.go taikoxyz/taiko-geth/cmd/evm/internal/t8ntool/execution.go
index c408623fe32f4c9b217cd4a9be29fa3955e3eaf8..312f427d4c6b98b4b86edef7ac23e4af2598be4b 100644
--- ethereum/go-ethereum/cmd/evm/internal/t8ntool/execution.go
+++ taikoxyz/taiko-geth/cmd/evm/internal/t8ntool/execution.go
@@ -59,7 +59,7 @@ GasUsed math.HexOrDecimal64 `json:"gasUsed"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
- CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"`
+ CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
}
type ommer struct {
@@ -69,25 +69,26 @@ }
//go:generate go run github.com/fjl/gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go
type stEnv struct {
- Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
- Difficulty *big.Int `json:"currentDifficulty"`
- Random *big.Int `json:"currentRandom"`
- ParentDifficulty *big.Int `json:"parentDifficulty"`
- ParentBaseFee *big.Int `json:"parentBaseFee,omitempty"`
- ParentGasUsed uint64 `json:"parentGasUsed,omitempty"`
- ParentGasLimit uint64 `json:"parentGasLimit,omitempty"`
- GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
- Number uint64 `json:"currentNumber" gencodec:"required"`
- Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
- ParentTimestamp uint64 `json:"parentTimestamp,omitempty"`
- BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
- Ommers []ommer `json:"ommers,omitempty"`
- Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
- BaseFee *big.Int `json:"currentBaseFee,omitempty"`
- ParentUncleHash common.Hash `json:"parentUncleHash"`
- ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"`
- ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"`
- ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"`
+ Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
+ Difficulty *big.Int `json:"currentDifficulty"`
+ Random *big.Int `json:"currentRandom"`
+ ParentDifficulty *big.Int `json:"parentDifficulty"`
+ ParentBaseFee *big.Int `json:"parentBaseFee,omitempty"`
+ ParentGasUsed uint64 `json:"parentGasUsed,omitempty"`
+ ParentGasLimit uint64 `json:"parentGasLimit,omitempty"`
+ GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
+ Number uint64 `json:"currentNumber" gencodec:"required"`
+ Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
+ ParentTimestamp uint64 `json:"parentTimestamp,omitempty"`
+ BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
+ Ommers []ommer `json:"ommers,omitempty"`
+ Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
+ BaseFee *big.Int `json:"currentBaseFee,omitempty"`
+ ParentUncleHash common.Hash `json:"parentUncleHash"`
+ ExcessBlobGas *uint64 `json:"currentExcessBlobGas,omitempty"`
+ ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"`
+ ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"`
+ ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
}
type stEnvMarshaling struct {
@@ -162,17 +163,19 @@ if pre.Env.Random != nil {
rnd := common.BigToHash(pre.Env.Random)
vmContext.Random = &rnd
}
- // If excessBlobGas is defined, add it to the vmContext.
+ // Calculate the BlobBaseFee
+ var excessBlobGas uint64
if pre.Env.ExcessBlobGas != nil {
- vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas
+ excessBlobGas := *pre.Env.ExcessBlobGas
+ vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
} else {
// If it is not explicitly defined, but we have the parent values, we try
// to calculate it ourselves.
parentExcessBlobGas := pre.Env.ParentExcessBlobGas
parentBlobGasUsed := pre.Env.ParentBlobGasUsed
if parentExcessBlobGas != nil && parentBlobGasUsed != nil {
- excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed)
- vmContext.ExcessBlobGas = &excessBlobGas
+ excessBlobGas = eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed)
+ vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
}
}
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
@@ -182,13 +185,20 @@ chainConfig.DAOForkBlock != nil &&
chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 {
misc.ApplyDAOHardFork(statedb)
}
+ if beaconRoot := pre.Env.ParentBeaconBlockRoot; beaconRoot != nil {
+ evm := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vmConfig)
+ core.ProcessBeaconBlockRoot(*beaconRoot, evm, statedb)
+ }
var blobGasUsed uint64
for i, tx := range txs {
- if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil {
+ if tx.Type() == types.BlobTxType && vmContext.BlobBaseFee == nil {
errMsg := "blob tx used but field env.ExcessBlobGas missing"
log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg)
rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg})
continue
+ }
+ if tx.Type() == types.BlobTxType {
+ blobGasUsed += uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes()))
}
msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee)
if err != nil {
@@ -218,9 +228,6 @@ log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From, "error", err)
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
gaspool.SetGas(prevGas)
continue
- }
- if tx.Type() == types.BlobTxType {
- blobGasUsed += params.BlobTxBlobGasPerBlob
}
includedTxs = append(includedTxs, tx)
if hashError != nil {
@@ -317,8 +324,8 @@ if pre.Env.Withdrawals != nil {
h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil))
execRs.WithdrawalsRoot = &h
}
- if vmContext.ExcessBlobGas != nil {
- execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas)
+ if vmContext.BlobBaseFee != nil {
+ execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas)
execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
}
// Re-create statedb instance with new root upon the updated database
diff --git ethereum/go-ethereum/cmd/evm/internal/t8ntool/transition.go taikoxyz/taiko-geth/cmd/evm/internal/t8ntool/transition.go
index cbb39294d04c13f2ff150f93eb8b02e5c730313c..600bc460f726c4344e8611e2c023bda53ad6c4fd 100644
--- ethereum/go-ethereum/cmd/evm/internal/t8ntool/transition.go
+++ taikoxyz/taiko-geth/cmd/evm/internal/t8ntool/transition.go
@@ -192,105 +192,20 @@ }
// Set the chain id
chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name))
- var txsWithKeys []*txWithKey
- if txStr != stdinSelector {
- inFile, err := os.Open(txStr)
- if err != nil {
- return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err))
- }
- defer inFile.Close()
- decoder := json.NewDecoder(inFile)
- if strings.HasSuffix(txStr, ".rlp") {
- var body hexutil.Bytes
- if err := decoder.Decode(&body); err != nil {
- return err
- }
- var txs types.Transactions
- if err := rlp.DecodeBytes(body, &txs); err != nil {
- return err
- }
- for _, tx := range txs {
- txsWithKeys = append(txsWithKeys, &txWithKey{
- key: nil,
- tx: tx,
- })
- }
- } else {
- if err := decoder.Decode(&txsWithKeys); err != nil {
- return NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err))
- }
- }
- } else {
- if len(inputData.TxRlp) > 0 {
- // Decode the body of already signed transactions
- body := common.FromHex(inputData.TxRlp)
- var txs types.Transactions
- if err := rlp.DecodeBytes(body, &txs); err != nil {
- return err
- }
- for _, tx := range txs {
- txsWithKeys = append(txsWithKeys, &txWithKey{
- key: nil,
- tx: tx,
- })
- }
- } else {
- // JSON encoded transactions
- txsWithKeys = inputData.Txs
- }
+ if txs, err = loadTransactions(txStr, inputData, prestate.Env, chainConfig); err != nil {
+ return err
}
- // We may have to sign the transactions.
- signer := types.MakeSigner(chainConfig, big.NewInt(int64(prestate.Env.Number)), prestate.Env.Timestamp)
-
- if txs, err = signUnsignedTransactions(txsWithKeys, signer); err != nil {
- return NewError(ErrorJson, fmt.Errorf("failed signing transactions: %v", err))
+ if err := applyLondonChecks(&prestate.Env, chainConfig); err != nil {
+ return err
}
- // Sanity check, to not `panic` in state_transition
- if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
- if prestate.Env.BaseFee != nil {
- // Already set, base fee has precedent over parent base fee.
- } else if prestate.Env.ParentBaseFee != nil && prestate.Env.Number != 0 {
- parent := &types.Header{
- Number: new(big.Int).SetUint64(prestate.Env.Number - 1),
- BaseFee: prestate.Env.ParentBaseFee,
- GasUsed: prestate.Env.ParentGasUsed,
- GasLimit: prestate.Env.ParentGasLimit,
- }
- prestate.Env.BaseFee = eip1559.CalcBaseFee(chainConfig, parent)
- } else {
- return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
- }
+ if err := applyShanghaiChecks(&prestate.Env, chainConfig); err != nil {
+ return err
}
- if chainConfig.IsShanghai(big.NewInt(int64(prestate.Env.Number)), prestate.Env.Timestamp) && prestate.Env.Withdrawals == nil {
- return NewError(ErrorConfig, errors.New("Shanghai config but missing 'withdrawals' in env section"))
+ if err := applyMergeChecks(&prestate.Env, chainConfig); err != nil {
+ return err
}
- isMerged := chainConfig.TerminalTotalDifficulty != nil && chainConfig.TerminalTotalDifficulty.BitLen() == 0
- env := prestate.Env
- if isMerged {
- // post-merge:
- // - random must be supplied
- // - difficulty must be zero
- switch {
- case env.Random == nil:
- return NewError(ErrorConfig, errors.New("post-merge requires currentRandom to be defined in env"))
- case env.Difficulty != nil && env.Difficulty.BitLen() != 0:
- return NewError(ErrorConfig, errors.New("post-merge difficulty must be zero (or omitted) in env"))
- }
- prestate.Env.Difficulty = nil
- } else if env.Difficulty == nil {
- // pre-merge:
- // If difficulty was not provided by caller, we need to calculate it.
- switch {
- case env.ParentDifficulty == nil:
- return NewError(ErrorConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
- case env.Number == 0:
- return NewError(ErrorConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
- case env.Timestamp <= env.ParentTimestamp:
- return NewError(ErrorConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
- env.Timestamp, env.ParentTimestamp))
- }
- prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp,
- env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash)
+ if err := applyCancunChecks(&prestate.Env, chainConfig); err != nil {
+ return err
}
// Run the test and aggregate the result
s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
@@ -358,31 +273,147 @@ // To manage this, we read the transactions twice, first trying to read the secretKeys,
// and secondly to read them with the standard tx json format
func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Transactions, error) {
var signedTxs []*types.Transaction
- for i, txWithKey := range txs {
- tx := txWithKey.tx
- key := txWithKey.key
- v, r, s := tx.RawSignatureValues()
- if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 {
- // This transaction needs to be signed
- var (
- signed *types.Transaction
- err error
- )
- if txWithKey.protected {
- signed, err = types.SignTx(tx, signer, key)
- } else {
- signed, err = types.SignTx(tx, types.FrontierSigner{}, key)
+ for i, tx := range txs {
+ var (
+ v, r, s = tx.tx.RawSignatureValues()
+ signed *types.Transaction
+ err error
+ )
+ if tx.key == nil || v.BitLen()+r.BitLen()+s.BitLen() != 0 {
+ // Already signed
+ signedTxs = append(signedTxs, tx.tx)
+ continue
+ }
+ // This transaction needs to be signed
+ if tx.protected {
+ signed, err = types.SignTx(tx.tx, signer, tx.key)
+ } else {
+ signed, err = types.SignTx(tx.tx, types.FrontierSigner{}, tx.key)
+ }
+ if err != nil {
+ return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err))
+ }
+ signedTxs = append(signedTxs, signed)
+ }
+ return signedTxs, nil
+}
+
+func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *params.ChainConfig) (types.Transactions, error) {
+ var txsWithKeys []*txWithKey
+ var signed types.Transactions
+ if txStr != stdinSelector {
+ data, err := os.ReadFile(txStr)
+ if err != nil {
+ return nil, NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err))
+ }
+ if strings.HasSuffix(txStr, ".rlp") { // A file containing an rlp list
+ var body hexutil.Bytes
+ if err := json.Unmarshal(data, &body); err != nil {
+ return nil, err
}
- if err != nil {
- return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err))
+ // Already signed transactions
+ if err := rlp.DecodeBytes(body, &signed); err != nil {
+ return nil, err
}
- signedTxs = append(signedTxs, signed)
- } else {
- // Already signed
- signedTxs = append(signedTxs, tx)
+ return signed, nil
+ }
+ if err := json.Unmarshal(data, &txsWithKeys); err != nil {
+ return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err))
+ }
+ } else {
+ if len(inputData.TxRlp) > 0 {
+ // Decode the body of already signed transactions
+ body := common.FromHex(inputData.TxRlp)
+ // Already signed transactions
+ if err := rlp.DecodeBytes(body, &signed); err != nil {
+ return nil, err
+ }
+ return signed, nil
+ }
+ // JSON encoded transactions
+ txsWithKeys = inputData.Txs
+ }
+ // We may have to sign the transactions.
+ signer := types.LatestSignerForChainID(chainConfig.ChainID)
+ return signUnsignedTransactions(txsWithKeys, signer)
+}
+
+func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error {
+ if !chainConfig.IsLondon(big.NewInt(int64(env.Number))) {
+ return nil
+ }
+ // Sanity check, to not `panic` in state_transition
+ if env.BaseFee != nil {
+ // Already set, base fee has precedent over parent base fee.
+ return nil
+ }
+ if env.ParentBaseFee == nil || env.Number == 0 {
+ return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
+ }
+ env.BaseFee = eip1559.CalcBaseFee(chainConfig, &types.Header{
+ Number: new(big.Int).SetUint64(env.Number - 1),
+ BaseFee: env.ParentBaseFee,
+ GasUsed: env.ParentGasUsed,
+ GasLimit: env.ParentGasLimit,
+ })
+ return nil
+}
+
+func applyShanghaiChecks(env *stEnv, chainConfig *params.ChainConfig) error {
+ if !chainConfig.IsShanghai(big.NewInt(int64(env.Number)), env.Timestamp) {
+ return nil
+ }
+ if env.Withdrawals == nil {
+ return NewError(ErrorConfig, errors.New("Shanghai config but missing 'withdrawals' in env section"))
+ }
+ return nil
+}
+
+func applyMergeChecks(env *stEnv, chainConfig *params.ChainConfig) error {
+ isMerged := chainConfig.TerminalTotalDifficulty != nil && chainConfig.TerminalTotalDifficulty.BitLen() == 0
+ if !isMerged {
+ // pre-merge: If difficulty was not provided by caller, we need to calculate it.
+ if env.Difficulty != nil {
+ // already set
+ return nil
+ }
+ switch {
+ case env.ParentDifficulty == nil:
+ return NewError(ErrorConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
+ case env.Number == 0:
+ return NewError(ErrorConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
+ case env.Timestamp <= env.ParentTimestamp:
+ return NewError(ErrorConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
+ env.Timestamp, env.ParentTimestamp))
}
+ env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp,
+ env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash)
+ return nil
}
- return signedTxs, nil
+ // post-merge:
+ // - random must be supplied
+ // - difficulty must be zero
+ switch {
+ case env.Random == nil:
+ return NewError(ErrorConfig, errors.New("post-merge requires currentRandom to be defined in env"))
+ case env.Difficulty != nil && env.Difficulty.BitLen() != 0:
+ return NewError(ErrorConfig, errors.New("post-merge difficulty must be zero (or omitted) in env"))
+ }
+ env.Difficulty = nil
+ return nil
+}
+
+func applyCancunChecks(env *stEnv, chainConfig *params.ChainConfig) error {
+ if !chainConfig.IsCancun(big.NewInt(int64(env.Number)), env.Timestamp) {
+ env.ParentBeaconBlockRoot = nil // un-set it if it has been set too early
+ return nil
+ }
+ // Post-cancun
+ // We require EIP-4788 beacon root to be set in the env
+ if env.ParentBeaconBlockRoot == nil {
+ return NewError(ErrorConfig, errors.New("post-cancun env requires parentBeaconBlockRoot to be set"))
+ }
+ return nil
}
type Alloc map[common.Address]core.GenesisAccount
diff --git ethereum/go-ethereum/cmd/evm/testdata/29/readme.md taikoxyz/taiko-geth/cmd/evm/testdata/29/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..ab02ce9cf8d1bf675ac13125137ecf8b6c0a8a97
--- /dev/null
+++ taikoxyz/taiko-geth/cmd/evm/testdata/29/readme.md
@@ -0,0 +1,29 @@
+## EIP 4788
+
+This test contains testcases for EIP-4788. The 4788-contract is
+located at address `0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02`, and this test executes a simple transaction. It also
+implicitly invokes the system tx, which sets calls the contract and sets the
+storage values
+
+```
+$ dir=./testdata/29/ && go run . t8n --state.fork=Cancun --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout
+INFO [09-27|15:34:53.049] Trie dumping started root=19a4f8..01573c
+INFO [09-27|15:34:53.049] Trie dumping complete accounts=2 elapsed="192.759µs"
+INFO [09-27|15:34:53.050] Wrote file file=result.json
+{
+ "alloc": {
+ "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": {
+ "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
+ "storage": {
+ "0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e",
+ "0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00"
+ },
+ "balance": "0x1"
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0x16345785d871db8",
+ "nonce": "0x1"
+ }
+ }
+}
+```
diff --git ethereum/go-ethereum/cmd/evm/testdata/8/readme.md taikoxyz/taiko-geth/cmd/evm/testdata/8/readme.md
index 4dffdab91ebd75d3806a55b237d70783d56a39c4..85aae189243c551af1efa603db2d5c92ea79974e 100644
--- ethereum/go-ethereum/cmd/evm/testdata/8/readme.md
+++ taikoxyz/taiko-geth/cmd/evm/testdata/8/readme.md
@@ -32,7 +32,7 @@ {"pc":1,"op":84,"gas":"0x484be","gasCost":"0x64","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"SLOAD"}
{"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memSize":0,"stack":["0x3"],"depth":1,"refund":0,"opName":"SLOAD"}
```
-Simlarly, we can provide the input transactions via `stdin` instead of as file:
+Similarly, we can provide the input transactions via `stdin` instead of as file:
```
$ dir=./testdata/8 \
diff --git ethereum/go-ethereum/cmd/evm/testdata/9/readme.md taikoxyz/taiko-geth/cmd/evm/testdata/9/readme.md
index 539478028869519dda38ceb4907831f52ea4897f..357e200682f6cd3e59a644c1c6083fc8bc54521d 100644
--- ethereum/go-ethereum/cmd/evm/testdata/9/readme.md
+++ taikoxyz/taiko-geth/cmd/evm/testdata/9/readme.md
@@ -1,6 +1,6 @@
## EIP-1559 testing
-This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter.
+This test contains testcases for EIP-1559, which uses a new transaction type and has a new block parameter.
### Prestate
diff --git ethereum/go-ethereum/cmd/geth/testdata/vcheck/minisig-sigs-new/data.json.minisig taikoxyz/taiko-geth/cmd/geth/testdata/vcheck/minisig-sigs-new/data.json.minisig
new file mode 100644
index 0000000000000000000000000000000000000000..eaea9f9053f43037c0702fd8dde0f706459bfec4
--- /dev/null
+++ taikoxyz/taiko-geth/cmd/geth/testdata/vcheck/minisig-sigs-new/data.json.minisig
@@ -0,0 +1,4 @@
+untrusted comment: signature from minisign secret key
+RUQkliYstQBOKLK05Sy5f3bVRMBqJT26ABo6Vbp3BNJAVjejoqYCu4GWE/+7qcDfHBqYIniDCbFIUvYEnOHxV6vZ93wO1xJWDQw=
+trusted comment: timestamp:1693986492 file:data.json hashed
+6Fdw2H+W1ZXK7QXSF77Z5AWC7+AEFAfDmTSxNGylU5HLT1AuSJQmxslj+VjtUBamYCvOuET7plbXza942AlWDw==
diff --git ethereum/go-ethereum/common/types.go taikoxyz/taiko-geth/common/types.go
index bf74e43716a1afe6d844ee48120dc7347f303817..aadca87f82af89543de3387e24a90cba5fe1846f 100644
--- ethereum/go-ethereum/common/types.go
+++ taikoxyz/taiko-geth/common/types.go
@@ -44,6 +44,12 @@
var (
hashT = reflect.TypeOf(Hash{})
addressT = reflect.TypeOf(Address{})
+
+ // MaxAddress represents the maximum possible address value.
+ MaxAddress = HexToAddress("0xffffffffffffffffffffffffffffffffffffffff")
+
+ // MaxHash represents the maximum possible hash value.
+ MaxHash = HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
)
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
@@ -238,9 +244,6 @@ }
// Bytes gets the string representation of the underlying address.
func (a Address) Bytes() []byte { return a[:] }
-
-// Hash converts an address to a hash by left-padding it with zeros.
-func (a Address) Hash() Hash { return BytesToHash(a[:]) }
// Big converts an address to a big integer.
func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }
diff --git ethereum/go-ethereum/common/types_test.go taikoxyz/taiko-geth/common/types_test.go
index ad892671b5b34e889373d032cc3cd5d3613f0116..cec689ea39f7556e2765d0d98bd09eaf96c4e688 100644
--- ethereum/go-ethereum/common/types_test.go
+++ taikoxyz/taiko-geth/common/types_test.go
@@ -25,6 +25,7 @@ "math/big"
"reflect"
"strings"
"testing"
+ "time"
)
func TestBytesConversion(t *testing.T) {
@@ -583,3 +584,14 @@ if addr != dec {
t.Fatal("Unexpected address after unmarshal")
}
}
+
+func BenchmarkPrettyDuration(b *testing.B) {
+ var x = PrettyDuration(time.Duration(int64(1203123912312)))
+ b.Logf("Pre %s", time.Duration(x).String())
+ var a string
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ a = x.String()
+ }
+ b.Logf("Post %s", a)
+}
diff --git ethereum/go-ethereum/consensus/misc/eip4844/eip4844.go taikoxyz/taiko-geth/consensus/misc/eip4844/eip4844.go
index 583bcdeecd6e369ae6b890cddab071a5a26097da..2dad9a0cd3de18ac8db0dde8ba7101300b11d6d9 100644
--- ethereum/go-ethereum/consensus/misc/eip4844/eip4844.go
+++ taikoxyz/taiko-geth/consensus/misc/eip4844/eip4844.go
@@ -42,8 +42,8 @@ if header.BlobGasUsed == nil {
return errors.New("header is missing blobGasUsed")
}
// Verify that the blob gas used remains within reasonable limits.
- if *header.BlobGasUsed > params.BlobTxMaxBlobGasPerBlock {
- return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, params.BlobTxMaxBlobGasPerBlock)
+ if *header.BlobGasUsed > params.MaxBlobGasPerBlock {
+ return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, params.MaxBlobGasPerBlock)
}
if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 {
return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob)
diff --git ethereum/go-ethereum/consensus/misc/eip4844/eip4844_test.go taikoxyz/taiko-geth/consensus/misc/eip4844/eip4844_test.go
index 677cdd252c442875e5bd09a96645e4a2cd640a8d..ec417380fcb083b54b3343aafa72767c9b8cbe06 100644
--- ethereum/go-ethereum/consensus/misc/eip4844/eip4844_test.go
+++ taikoxyz/taiko-geth/consensus/misc/eip4844/eip4844_test.go
@@ -45,14 +45,14 @@
// The excess blob gas should decrease by however much the target was
// under-shot, capped at zero.
{params.BlobTxTargetBlobGasPerBlock, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, params.BlobTxTargetBlobGasPerBlock},
- {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, params.BlobTxBlobGasPerBlob},
- {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 2, 0},
+ {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, params.BlobTxTargetBlobGasPerBlock - params.BlobTxBlobGasPerBlob},
+ {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 2, params.BlobTxTargetBlobGasPerBlock - (2 * params.BlobTxBlobGasPerBlob)},
{params.BlobTxBlobGasPerBlob - 1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, 0},
}
- for _, tt := range tests {
+ for i, tt := range tests {
result := CalcExcessBlobGas(tt.excess, tt.blobs*params.BlobTxBlobGasPerBlob)
if result != tt.want {
- t.Errorf("excess blob gas mismatch: have %v, want %v", result, tt.want)
+ t.Errorf("test %d: excess blob gas mismatch: have %v, want %v", i, result, tt.want)
}
}
}
@@ -63,9 +63,9 @@ excessBlobGas uint64
blobfee int64
}{
{0, 1},
- {1542706, 1},
- {1542707, 2},
- {10 * 1024 * 1024, 111},
+ {2314057, 1},
+ {2314058, 2},
+ {10 * 1024 * 1024, 23},
}
for i, tt := range tests {
have := CalcBlobFee(tt.excessBlobGas)
diff --git ethereum/go-ethereum/core/state/pruner/pruner.go taikoxyz/taiko-geth/core/state/pruner/pruner.go
index 64c4a3a6ebaa7c64c15694439b12192801717399..5acf54f64272014a6c7383afc81c5970b6a0196b 100644
--- ethereum/go-ethereum/core/state/pruner/pruner.go
+++ taikoxyz/taiko-geth/core/state/pruner/pruner.go
@@ -85,13 +85,16 @@ headBlock := rawdb.ReadHeadBlock(db)
if headBlock == nil {
return nil, errors.New("failed to load head block")
}
+ // Offline pruning is only supported in legacy hash based scheme.
+ triedb := trie.NewDatabase(db, trie.HashDefaults)
+
snapconfig := snapshot.Config{
CacheSize: 256,
Recovery: false,
NoBuild: true,
AsyncBuild: false,
}
- snaptree, err := snapshot.New(snapconfig, db, trie.NewDatabase(db), headBlock.Root())
+ snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Root())
if err != nil {
return nil, err // The relevant snapshot(s) might not exist
}
@@ -361,7 +364,9 @@ Recovery: true,
NoBuild: true,
AsyncBuild: false,
}
- snaptree, err := snapshot.New(snapconfig, db, trie.NewDatabase(db), headBlock.Root())
+ // Offline pruning is only supported in legacy hash based scheme.
+ triedb := trie.NewDatabase(db, trie.HashDefaults)
+ snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Root())
if err != nil {
return err // The relevant snapshot(s) might not exist
}
@@ -403,7 +408,7 @@ genesis := rawdb.ReadBlock(db, genesisHash, 0)
if genesis == nil {
return errors.New("missing genesis block")
}
- t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db))
+ t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db, trie.HashDefaults))
if err != nil {
return err
}
@@ -427,7 +432,7 @@ return err
}
if acc.Root != types.EmptyRootHash {
id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root)
- storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db))
+ storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db, trie.HashDefaults))
if err != nil {
return err
}
diff --git ethereum/go-ethereum/core/state/snapshot/conversion.go taikoxyz/taiko-geth/core/state/snapshot/conversion.go
index 1e683f76ce04f5359c7022c0edc51349d3ef57f7..321bfbc6a2dc1165aeca05eacf101ae070da9432 100644
--- ethereum/go-ethereum/core/state/snapshot/conversion.go
+++ taikoxyz/taiko-geth/core/state/snapshot/conversion.go
@@ -364,11 +364,11 @@
func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) {
var nodeWriter trie.NodeWriteFunc
if db != nil {
- nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ nodeWriter = func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme)
}
}
- t := trie.NewStackTrieWithOwner(nodeWriter, owner)
+ t := trie.NewStackTrie(nodeWriter)
for leaf := range in {
t.Update(leaf.key[:], leaf.value)
}
diff --git ethereum/go-ethereum/core/state/snapshot/generate.go taikoxyz/taiko-geth/core/state/snapshot/generate.go
index 0a85f0006d87bb65a4190d5d53fd089d3076128e..204584c956eabb73c0457d318d7af27b3119b5c7 100644
--- ethereum/go-ethereum/core/state/snapshot/generate.go
+++ taikoxyz/taiko-geth/core/state/snapshot/generate.go
@@ -247,11 +247,6 @@ if err != nil {
ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
return nil, errMissingTrie
}
- // Firstly find out the key of last iterated element.
- var last []byte
- if len(keys) > 0 {
- last = keys[len(keys)-1]
- }
// Generate the Merkle proofs for the first and last element
if origin == nil {
origin = common.Hash{}.Bytes()
@@ -266,9 +261,9 @@ proofErr: err,
tr: tr,
}, nil
}
- if last != nil {
- if err := tr.Prove(last, proof); err != nil {
- log.Debug("Failed to prove range", "kind", kind, "last", last, "err", err)
+ if len(keys) > 0 {
+ if err := tr.Prove(keys[len(keys)-1], proof); err != nil {
+ log.Debug("Failed to prove range", "kind", kind, "last", keys[len(keys)-1], "err", err)
return &proofResult{
keys: keys,
vals: vals,
@@ -280,7 +275,7 @@ }
}
// Verify the snapshot segment with range prover, ensure that all flat states
// in this range correspond to merkle trie.
- cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof)
+ cont, err := trie.VerifyRangeProof(root, origin, keys, vals, proof)
return &proofResult{
keys: keys,
vals: vals,
@@ -356,7 +351,8 @@ // main account trie as a primary lookup when resolving hashes
var resolver trie.NodeResolver
if len(result.keys) > 0 {
mdb := rawdb.NewMemoryDatabase()
- tdb := trie.NewDatabase(mdb)
+ tdb := trie.NewDatabase(mdb, trie.HashDefaults)
+ defer tdb.Close()
snapTrie := trie.NewEmpty(tdb)
for i, key := range result.keys {
snapTrie.Update(key, result.vals[i])
@@ -445,6 +441,10 @@ }
internal += time.Since(istart)
}
if iter.Err != nil {
+ // Trie errors should never happen. Still, in case of a bug, expose the
+ // error here, as the outer code will presume errors are interrupts, not
+ // some deeper issues.
+ log.Error("State snapshotter failed to iterate trie", "err", err)
return false, nil, iter.Err
}
// Delete all stale snapshot states remaining
diff --git ethereum/go-ethereum/core/state/snapshot/generate_test.go taikoxyz/taiko-geth/core/state/snapshot/generate_test.go
index c503676241f903340626052405e1112dae3c1933..07016b675ce854ff07f541e51647a2614391219d 100644
--- ethereum/go-ethereum/core/state/snapshot/generate_test.go
+++ taikoxyz/taiko-geth/core/state/snapshot/generate_test.go
@@ -30,6 +30,8 @@ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -45,10 +47,15 @@ }
// Tests that snapshot generation from an empty database.
func TestGeneration(t *testing.T) {
+ testGeneration(t, rawdb.HashScheme)
+ testGeneration(t, rawdb.PathScheme)
+}
+
+func testGeneration(t *testing.T, scheme string) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- var helper = newHelper()
+ var helper = newHelper(scheme)
stRoot := helper.makeStorageTrie(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false)
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
@@ -79,10 +86,15 @@ }
// Tests that snapshot generation with existent flat state.
func TestGenerateExistentState(t *testing.T) {
+ testGenerateExistentState(t, rawdb.HashScheme)
+ testGenerateExistentState(t, rawdb.PathScheme)
+}
+
+func testGenerateExistentState(t *testing.T, scheme string) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- var helper = newHelper()
+ var helper = newHelper(scheme)
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
@@ -148,9 +160,15 @@ accTrie *trie.StateTrie
nodes *trienode.MergedNodeSet
}
-func newHelper() *testHelper {
+func newHelper(scheme string) *testHelper {
diskdb := rawdb.NewMemoryDatabase()
- triedb := trie.NewDatabase(diskdb)
+ config := &trie.Config{}
+ if scheme == rawdb.PathScheme {
+ config.PathDB = &pathdb.Config{} // disable caching
+ } else {
+ config.HashDB = &hashdb.Config{} // disable caching
+ }
+ triedb := trie.NewDatabase(diskdb, config)
accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb)
return &testHelper{
diskdb: diskdb,
@@ -233,7 +251,12 @@ // - extra slots in the beginning
// - extra slots in the middle
// - extra slots in the end
func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
- helper := newHelper()
+ testGenerateExistentStateWithWrongStorage(t, rawdb.HashScheme)
+ testGenerateExistentStateWithWrongStorage(t, rawdb.PathScheme)
+}
+
+func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) {
+ helper := newHelper(scheme)
// Account one, empty root but non-empty database
helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
@@ -325,7 +348,12 @@ // - miss accounts
// - wrong accounts
// - extra accounts
func TestGenerateExistentStateWithWrongAccounts(t *testing.T) {
- helper := newHelper()
+ testGenerateExistentStateWithWrongAccounts(t, rawdb.HashScheme)
+ testGenerateExistentStateWithWrongAccounts(t, rawdb.PathScheme)
+}
+
+func testGenerateExistentStateWithWrongAccounts(t *testing.T, scheme string) {
+ helper := newHelper(scheme)
helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
@@ -380,10 +408,15 @@
// Tests that snapshot generation errors out correctly in case of a missing trie
// node in the account trie.
func TestGenerateCorruptAccountTrie(t *testing.T) {
+ testGenerateCorruptAccountTrie(t, rawdb.HashScheme)
+ testGenerateCorruptAccountTrie(t, rawdb.PathScheme)
+}
+
+func testGenerateCorruptAccountTrie(t *testing.T, scheme string) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// without any storage slots to keep the test smaller.
- helper := newHelper()
+ helper := newHelper(scheme)
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
@@ -391,9 +424,11 @@ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
- // Delete an account trie leaf and ensure the generator chokes
- helper.triedb.Commit(root, false)
- helper.diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes())
+ // Delete an account trie node and ensure the generator chokes
+ targetPath := []byte{0xc}
+ targetHash := common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7")
+
+ rawdb.DeleteTrieNode(helper.diskdb, common.Hash{}, targetPath, targetHash, scheme)
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
select {
@@ -414,11 +449,19 @@ // Tests that snapshot generation errors out correctly in case of a missing root
// trie node for a storage trie. It's similar to internal corruption but it is
// handled differently inside the generator.
func TestGenerateMissingStorageTrie(t *testing.T) {
+ testGenerateMissingStorageTrie(t, rawdb.HashScheme)
+ testGenerateMissingStorageTrie(t, rawdb.PathScheme)
+}
+
+func testGenerateMissingStorageTrie(t *testing.T, scheme string) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- helper := newHelper()
-
+ var (
+ acc1 = hashData([]byte("acc-1"))
+ acc3 = hashData([]byte("acc-3"))
+ helper = newHelper(scheme)
+ )
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
@@ -427,8 +470,9 @@ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
root := helper.Commit()
- // Delete a storage trie root and ensure the generator chokes
- helper.diskdb.Delete(stRoot.Bytes())
+ // Delete storage trie root of account one and three.
+ rawdb.DeleteTrieNode(helper.diskdb, acc1, nil, stRoot, scheme)
+ rawdb.DeleteTrieNode(helper.diskdb, acc3, nil, stRoot, scheme)
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
select {
@@ -448,10 +492,15 @@
// Tests that snapshot generation errors out correctly in case of a missing trie
// node in a storage trie.
func TestGenerateCorruptStorageTrie(t *testing.T) {
+ testGenerateCorruptStorageTrie(t, rawdb.HashScheme)
+ testGenerateCorruptStorageTrie(t, rawdb.PathScheme)
+}
+
+func testGenerateCorruptStorageTrie(t *testing.T, scheme string) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- helper := newHelper()
+ helper := newHelper(scheme)
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
@@ -461,8 +510,11 @@ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
root := helper.Commit()
- // Delete a storage trie leaf and ensure the generator chokes
- helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes())
+ // Delete a node in the storage trie.
+ targetPath := []byte{0x4}
+ targetHash := common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371")
+ rawdb.DeleteTrieNode(helper.diskdb, hashData([]byte("acc-1")), targetPath, targetHash, scheme)
+ rawdb.DeleteTrieNode(helper.diskdb, hashData([]byte("acc-3")), targetPath, targetHash, scheme)
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
select {
@@ -481,7 +533,12 @@ }
// Tests that snapshot generation when an extra account with storage exists in the snap state.
func TestGenerateWithExtraAccounts(t *testing.T) {
- helper := newHelper()
+ testGenerateWithExtraAccounts(t, rawdb.HashScheme)
+ testGenerateWithExtraAccounts(t, rawdb.PathScheme)
+}
+
+func testGenerateWithExtraAccounts(t *testing.T, scheme string) {
+ helper := newHelper(scheme)
{
// Account one in the trie
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")),
@@ -549,10 +606,15 @@ }
// Tests that snapshot generation when an extra account with storage exists in the snap state.
func TestGenerateWithManyExtraAccounts(t *testing.T) {
+ testGenerateWithManyExtraAccounts(t, rawdb.HashScheme)
+ testGenerateWithManyExtraAccounts(t, rawdb.PathScheme)
+}
+
+func testGenerateWithManyExtraAccounts(t *testing.T, scheme string) {
if false {
enableLogging()
}
- helper := newHelper()
+ helper := newHelper(scheme)
{
// Account one in the trie
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")),
@@ -605,11 +667,16 @@ // The trie has a lot of deletions.
// So in trie, we iterate 2 entries 0x03, 0x07. We create the 0x07 in the database and abort the procedure, because the trie is exhausted.
// But in the database, we still have the stale storage slots 0x04, 0x05. They are not iterated yet, but the procedure is finished.
func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
+ testGenerateWithExtraBeforeAndAfter(t, rawdb.HashScheme)
+ testGenerateWithExtraBeforeAndAfter(t, rawdb.PathScheme)
+}
+
+func testGenerateWithExtraBeforeAndAfter(t *testing.T, scheme string) {
accountCheckRange = 3
if false {
enableLogging()
}
- helper := newHelper()
+ helper := newHelper(scheme)
{
acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
@@ -642,11 +709,16 @@
// TestGenerateWithMalformedSnapdata tests what happes if we have some junk
// in the snapshot database, which cannot be parsed back to an account
func TestGenerateWithMalformedSnapdata(t *testing.T) {
+ testGenerateWithMalformedSnapdata(t, rawdb.HashScheme)
+ testGenerateWithMalformedSnapdata(t, rawdb.PathScheme)
+}
+
+func testGenerateWithMalformedSnapdata(t *testing.T, scheme string) {
accountCheckRange = 3
if false {
enableLogging()
}
- helper := newHelper()
+ helper := newHelper(scheme)
{
acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
@@ -679,10 +751,15 @@ }
}
func TestGenerateFromEmptySnap(t *testing.T) {
+ testGenerateFromEmptySnap(t, rawdb.HashScheme)
+ testGenerateFromEmptySnap(t, rawdb.PathScheme)
+}
+
+func testGenerateFromEmptySnap(t *testing.T, scheme string) {
//enableLogging()
accountCheckRange = 10
storageCheckRange = 20
- helper := newHelper()
+ helper := newHelper(scheme)
// Add 1K accounts to the trie
for i := 0; i < 400; i++ {
stRoot := helper.makeStorageTrie(hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
@@ -714,8 +791,13 @@ // trie: 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
// This hits a case where the snap verification passes, but there are more elements in the trie
// which we must also add.
func TestGenerateWithIncompleteStorage(t *testing.T) {
+ testGenerateWithIncompleteStorage(t, rawdb.HashScheme)
+ testGenerateWithIncompleteStorage(t, rawdb.PathScheme)
+}
+
+func testGenerateWithIncompleteStorage(t *testing.T, scheme string) {
storageCheckRange = 4
- helper := newHelper()
+ helper := newHelper(scheme)
stKeys := []string{"1", "2", "3", "4", "5", "6", "7", "8"}
stVals := []string{"v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"}
// We add 8 accounts, each one is missing exactly one of the storage slots. This means
@@ -813,7 +895,12 @@ // the storage data is existent while the corresponding account data is missing.
//
// This test will populate some dangling storages to see if they can be cleaned up.
func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) {
- var helper = newHelper()
+ testGenerateCompleteSnapshotWithDanglingStorage(t, rawdb.HashScheme)
+ testGenerateCompleteSnapshotWithDanglingStorage(t, rawdb.PathScheme)
+}
+
+func testGenerateCompleteSnapshotWithDanglingStorage(t *testing.T, scheme string) {
+ var helper = newHelper(scheme)
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
@@ -848,7 +935,12 @@ // the storage data is existent while the corresponding account data is missing.
//
// This test will populate some dangling storages to see if they can be cleaned up.
func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) {
- var helper = newHelper()
+ testGenerateBrokenSnapshotWithDanglingStorage(t, rawdb.HashScheme)
+ testGenerateBrokenSnapshotWithDanglingStorage(t, rawdb.PathScheme)
+}
+
+func testGenerateBrokenSnapshotWithDanglingStorage(t *testing.T, scheme string) {
+ var helper = newHelper(scheme)
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
diff --git ethereum/go-ethereum/core/state/snapshot/snapshot.go taikoxyz/taiko-geth/core/state/snapshot/snapshot.go
index efc0fc26afdf15f52cd0ed6971cca8da774ed743..e30a0005ccce86abfb9d6c6c07ad0f493ec7608f 100644
--- ethereum/go-ethereum/core/state/snapshot/snapshot.go
+++ taikoxyz/taiko-geth/core/state/snapshot/snapshot.go
@@ -564,7 +564,7 @@
// Ensure we don't delete too much data blindly (contract can be
// huge). It's ok to flush, the root will go missing in case of a
// crash and we'll detect and regenerate the snapshot.
- if batch.ValueSize() > ethdb.IdealBatchSize {
+ if batch.ValueSize() > 64*1024*1024 {
if err := batch.Write(); err != nil {
log.Crit("Failed to write storage deletions", "err", err)
}
@@ -590,7 +590,7 @@
// Ensure we don't write too much data blindly. It's ok to flush, the
// root will go missing in case of a crash and we'll detect and regen
// the snapshot.
- if batch.ValueSize() > ethdb.IdealBatchSize {
+ if batch.ValueSize() > 64*1024*1024 {
if err := batch.Write(); err != nil {
log.Crit("Failed to write storage deletions", "err", err)
}
@@ -852,3 +852,21 @@ defer t.lock.Unlock()
return t.diskRoot()
}
+
+// Size returns the memory usage of the diff layers above the disk layer and the
+// dirty nodes buffered in the disk layer. Currently, the implementation uses a
+// special diff layer (the first) as an aggregator simulating a dirty buffer, so
+// the second return will always be 0. However, this will be made consistent with
+// the pathdb, which will require a second return.
+func (t *Tree) Size() (diffs common.StorageSize, buf common.StorageSize) {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ var size common.StorageSize
+ for _, layer := range t.layers {
+ if layer, ok := layer.(*diffLayer); ok {
+ size += common.StorageSize(layer.memory)
+ }
+ }
+ return size, 0
+}
diff --git ethereum/go-ethereum/core/txpool/blobpool/blobpool.go taikoxyz/taiko-geth/core/txpool/blobpool/blobpool.go
index c0dd6e8acc90fc19eca11d2a423736fe49f36b29..32c6c0e8feeff9db0bf2ebaedb9869decc82b336 100644
--- ethereum/go-ethereum/core/txpool/blobpool/blobpool.go
+++ taikoxyz/taiko-geth/core/txpool/blobpool/blobpool.go
@@ -19,6 +19,7 @@ package blobpool
import (
"container/heap"
+ "errors"
"fmt"
"math"
"math/big"
@@ -35,7 +36,6 @@ "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -53,7 +53,7 @@
// maxBlobsPerTransaction is the maximum number of blobs a single transaction
// is allowed to contain. Whilst the spec states it's unlimited, the block
// data slots are protocol bound, which implicitly also limit this.
- maxBlobsPerTransaction = params.BlobTxMaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob
+ maxBlobsPerTransaction = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob
// txAvgSize is an approximate byte size of a transaction metadata to avoid
// tiny overflows causing all txs to move a shelf higher, wasting disk space.
@@ -83,16 +83,6 @@ // but not yet finalized transaction blobs.
limboedTransactionStore = "limbo"
)
-// blobTx is a wrapper around types.BlobTx which also contains the literal blob
-// data along with all the transaction metadata.
-type blobTx struct {
- Tx *types.Transaction
-
- Blobs []kzg4844.Blob
- Commits []kzg4844.Commitment
- Proofs []kzg4844.Proof
-}
-
// blobTxMeta is the minimal subset of types.BlobTx necessary to validate and
// schedule the blob transactions into the following blocks. Only ever add the
// bare minimum needed fields to keep the size down (and thus number of entries
@@ -107,6 +97,8 @@ costCap *uint256.Int // Needed to validate cumulative balance sufficiency
execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump
execFeeCap *uint256.Int // Needed to validate replacement price bump
blobFeeCap *uint256.Int // Needed to validate replacement price bump
+ execGas uint64 // Needed to check inclusion validity before reading the blob
+ blobGas uint64 // Needed to check inclusion validity before reading the blob
basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap
blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap
@@ -128,6 +120,8 @@ costCap: uint256.MustFromBig(tx.Cost()),
execTipCap: uint256.MustFromBig(tx.GasTipCap()),
execFeeCap: uint256.MustFromBig(tx.GasFeeCap()),
blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()),
+ execGas: tx.Gas(),
+ blobGas: tx.BlobGas(),
}
meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap)
meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap)
@@ -317,8 +311,8 @@ index map[common.Address][]*blobTxMeta // Blob transactions grouped by accounts, sorted by nonce
spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts
evict *evictHeap // Heap of cheapest accounts for eviction when full
- eventFeed event.Feed // Event feed to send out new tx events on pool inclusion
- eventScope event.SubscriptionScope // Event scope to track and mass unsubscribe on termination
+ discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded)
+ insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included)
lock sync.RWMutex // Mutex protecting the pool during reorg handling
}
@@ -365,7 +359,13 @@ if err := os.MkdirAll(limbodir, 0700); err != nil {
return err
}
}
+ // Initialize the state with head block, or fallback to empty one in
+ // case the head state is not available(might occur when node is not
+ // fully synced).
state, err := p.chain.StateAt(head.Root)
+ if err != nil {
+ state, err = p.chain.StateAt(types.EmptyRootHash)
+ }
if err != nil {
return err
}
@@ -440,8 +440,6 @@ }
if err := p.store.Close(); err != nil {
errs = append(errs, err)
}
- p.eventScope.Close()
-
switch {
case errs == nil:
return nil
@@ -455,22 +453,27 @@
// parseTransaction is a callback method on pool creation that gets called for
// each transaction on disk to create the in-memory metadata index.
func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
- item := new(blobTx)
- if err := rlp.DecodeBytes(blob, item); err != nil {
+ tx := new(types.Transaction)
+ if err := rlp.DecodeBytes(blob, tx); err != nil {
// This path is impossible unless the disk data representation changes
// across restarts. For that ever unprobable case, recover gracefully
// by ignoring this data entry.
log.Error("Failed to decode blob pool entry", "id", id, "err", err)
return err
}
- meta := newBlobTxMeta(id, size, item.Tx)
+ if tx.BlobTxSidecar() == nil {
+ log.Error("Missing sidecar in blob pool entry", "id", id, "hash", tx.Hash())
+ return errors.New("missing blob sidecar")
+ }
+
+ meta := newBlobTxMeta(id, size, tx)
- sender, err := p.signer.Sender(item.Tx)
+ sender, err := p.signer.Sender(tx)
if err != nil {
// This path is impossible unless the signature validity changes across
// restarts. For that ever unprobable case, recover gracefully by ignoring
// this data entry.
- log.Error("Failed to recover blob tx sender", "id", id, "hash", item.Tx.Hash(), "err", err)
+ log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err)
return err
}
if _, ok := p.index[sender]; !ok {
@@ -718,17 +721,17 @@ if err != nil {
log.Error("Blobs missing for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
return
}
- item := new(blobTx)
- if err = rlp.DecodeBytes(data, item); err != nil {
+ var tx types.Transaction
+ if err = rlp.DecodeBytes(data, &tx); err != nil {
log.Error("Blobs corrupted for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err)
return
}
- block, ok := inclusions[item.Tx.Hash()]
+ block, ok := inclusions[tx.Hash()]
if !ok {
log.Warn("Blob transaction swapped out by signer", "from", addr, "nonce", nonce, "id", id)
return
}
- if err := p.limbo.push(item.Tx.Hash(), block, item.Blobs, item.Commits, item.Proofs); err != nil {
+ if err := p.limbo.push(&tx, block); err != nil {
log.Warn("Failed to offload blob tx into limbo", "err", err)
return
}
@@ -757,14 +760,20 @@
// Run the reorg between the old and new head and figure out which accounts
// need to be rechecked and which transactions need to be readded
if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil {
+ var adds []*types.Transaction
for addr, txs := range reinject {
// Blindly push all the lost transactions back into the pool
for _, tx := range txs {
- p.reinject(addr, tx)
+ if err := p.reinject(addr, tx.Hash()); err == nil {
+ adds = append(adds, tx.WithoutBlobTxSidecar())
+ }
}
// Recheck the account's pooled transactions to drop included and
// invalidated one
p.recheck(addr, inclusions)
+ }
+ if len(adds) > 0 {
+ p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
}
}
// Flush out any blobs from limbo that are older than the latest finality
@@ -920,32 +929,35 @@ //
// Note, the method will not initialize the eviction cache values as those will
// be done once for all transactions belonging to an account after all individual
// transactions are injected back into the pool.
-func (p *BlobPool) reinject(addr common.Address, tx *types.Transaction) {
+func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
// Retrieve the associated blob from the limbo. Without the blobs, we cannot
// add the transaction back into the pool as it is not mineable.
- blobs, commits, proofs, err := p.limbo.pull(tx.Hash())
+ tx, err := p.limbo.pull(txhash)
if err != nil {
log.Error("Blobs unavailable, dropping reorged tx", "err", err)
- return
+ return err
}
- // Serialize the transaction back into the primary datastore
- blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs, Commits: commits, Proofs: proofs})
+ // TODO: seems like an easy optimization here would be getting the serialized tx
+ // from limbo instead of re-serializing it here.
+
+ // Serialize the transaction back into the primary datastore.
+ blob, err := rlp.EncodeToBytes(tx)
if err != nil {
log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
- return
+ return err
}
id, err := p.store.Put(blob)
if err != nil {
log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err)
- return
+ return err
}
+
// Update the indixes and metrics
meta := newBlobTxMeta(id, p.store.Size(id), tx)
-
if _, ok := p.index[addr]; !ok {
if err := p.reserve(addr, true); err != nil {
log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err)
- return
+ return err
}
p.index[addr] = []*blobTxMeta{meta}
p.spent[addr] = meta.costCap
@@ -956,6 +968,7 @@ p.spent[addr] = new(uint256.Int).Add(p.spent[addr], meta.costCap)
}
p.lookup[meta.hash] = meta.id
p.stored += uint64(meta.size)
+ return nil
}
// SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements
@@ -1023,7 +1036,7 @@ }
// validateTx checks whether a transaction is valid according to the consensus
// rules and adheres to some heuristic limits of the local node (price and size).
-func (p *BlobPool) validateTx(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
+func (p *BlobPool) validateTx(tx *types.Transaction) error {
// Ensure the transaction adheres to basic pool filters (type, size, tip) and
// consensus rules
baseOpts := &txpool.ValidationOptions{
@@ -1032,7 +1045,7 @@ Accept: 1 << types.BlobTxType,
MaxSize: txMaxSize,
MinTip: p.gasTip.ToBig(),
}
- if err := txpool.ValidateTransaction(tx, blobs, commits, proofs, p.head, p.signer, baseOpts); err != nil {
+ if err := txpool.ValidateTransaction(tx, p.head, p.signer, baseOpts); err != nil {
return err
}
// Ensure the transaction adheres to the stateful pool filters (nonce, balance)
@@ -1117,7 +1130,7 @@ return ok
}
// Get returns a transaction if it is contained in the pool, or nil otherwise.
-func (p *BlobPool) Get(hash common.Hash) *txpool.Transaction {
+func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
// Track the amount of time waiting to retrieve a fully resolved blob tx from
// the pool and the amount of time actually spent on pulling the data from disk.
getStart := time.Now()
@@ -1139,32 +1152,37 @@ if err != nil {
log.Error("Tracked blob transaction missing from store", "hash", hash, "id", id, "err", err)
return nil
}
- item := new(blobTx)
+ item := new(types.Transaction)
if err = rlp.DecodeBytes(data, item); err != nil {
log.Error("Blobs corrupted for traced transaction", "hash", hash, "id", id, "err", err)
return nil
}
- return &txpool.Transaction{
- Tx: item.Tx,
- BlobTxBlobs: item.Blobs,
- BlobTxCommits: item.Commits,
- BlobTxProofs: item.Proofs,
- }
+ return item
}
// Add inserts a set of blob transactions into the pool if they pass validation (both
// consensus validity and pool restictions).
-func (p *BlobPool) Add(txs []*txpool.Transaction, local bool, sync bool) []error {
- errs := make([]error, len(txs))
+func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
+ var (
+ adds = make([]*types.Transaction, 0, len(txs))
+ errs = make([]error, len(txs))
+ )
for i, tx := range txs {
- errs[i] = p.add(tx.Tx, tx.BlobTxBlobs, tx.BlobTxCommits, tx.BlobTxProofs)
+ errs[i] = p.add(tx)
+ if errs[i] == nil {
+ adds = append(adds, tx.WithoutBlobTxSidecar())
+ }
+ }
+ if len(adds) > 0 {
+ p.discoverFeed.Send(core.NewTxsEvent{Txs: adds})
+ p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
}
return errs
}
// Add inserts a new blob transaction into the pool if it passes validation (both
// consensus validity and pool restictions).
-func (p *BlobPool) add(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) (err error) {
+func (p *BlobPool) add(tx *types.Transaction) (err error) {
// The blob pool blocks on adding a transaction. This is because blob txs are
// only even pulled form the network, so this method will act as the overload
// protection for fetches.
@@ -1178,7 +1196,7 @@ addtimeHist.Update(time.Since(start).Nanoseconds())
}(time.Now())
// Ensure the transaction is valid from all perspectives
- if err := p.validateTx(tx, blobs, commits, proofs); err != nil {
+ if err := p.validateTx(tx); err != nil {
log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
return err
}
@@ -1203,7 +1221,7 @@ }()
}
// Transaction permitted into the pool from a nonce and cost perspective,
// insert it into the database and update the indices
- blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs, Commits: commits, Proofs: proofs})
+ blob, err := rlp.EncodeToBytes(tx)
if err != nil {
log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
return err
@@ -1385,6 +1403,8 @@ Hash: tx.hash,
Time: time.Now(), // TODO(karalabe): Maybe save these and use that?
GasFeeCap: tx.execFeeCap.ToBig(),
GasTipCap: tx.execTipCap.ToBig(),
+ Gas: tx.execGas,
+ BlobGas: tx.blobGas,
})
}
if len(lazies) > 0 {
@@ -1469,10 +1489,14 @@ limboDatarealGauge.Update(int64(datareal))
limboSlotusedGauge.Update(int64(slotused))
}
-// SubscribeTransactions registers a subscription of NewTxsEvent and
-// starts sending event to the given channel.
-func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription {
- return p.eventScope.Track(p.eventFeed.Subscribe(ch))
+// SubscribeTransactions registers a subscription for new transaction events,
+// supporting feeding only newly seen or also resurrected transactions.
+func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
+ if reorgs {
+ return p.insertFeed.Subscribe(ch)
+ } else {
+ return p.discoverFeed.Subscribe(ch)
+ }
}
// Nonce returns the next nonce of an account, with all transactions executable
diff --git ethereum/go-ethereum/core/txpool/blobpool/blobpool_test.go taikoxyz/taiko-geth/core/txpool/blobpool/blobpool_test.go
index 78a5039b5b13d256b122abd91428d2eccf4ee683..8914301e14c32648498077d51ee1d3ddc5484734 100644
--- ethereum/go-ethereum/core/txpool/blobpool/blobpool_test.go
+++ taikoxyz/taiko-geth/core/txpool/blobpool/blobpool_test.go
@@ -193,8 +193,8 @@ // makeTx is a utility method to construct a random blob transaction and sign it
// with a valid key, only setting the interesting fields from the perspective of
// the blob pool.
func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, key *ecdsa.PrivateKey) *types.Transaction {
- tx, _ := types.SignNewTx(key, types.LatestSigner(testChainConfig), makeUnsignedTx(nonce, gasTipCap, gasFeeCap, blobFeeCap))
- return tx
+ blobtx := makeUnsignedTx(nonce, gasTipCap, gasFeeCap, blobFeeCap)
+ return types.MustSignNewTx(key, types.LatestSigner(testChainConfig), blobtx)
}
// makeUnsignedTx is a utility method to construct a random blob tranasaction
@@ -209,6 +209,11 @@ Gas: 21000,
BlobFeeCap: uint256.NewInt(blobFeeCap),
BlobHashes: []common.Hash{emptyBlobVHash},
Value: uint256.NewInt(100),
+ Sidecar: &types.BlobTxSidecar{
+ Blobs: []kzg4844.Blob{emptyBlob},
+ Commitments: []kzg4844.Commitment{emptyBlobCommit},
+ Proofs: []kzg4844.Proof{emptyBlobProof},
+ },
}
}
@@ -341,7 +346,7 @@ V: new(uint256.Int),
R: new(uint256.Int),
S: new(uint256.Int),
})
- blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ blob, _ := rlp.EncodeToBytes(tx)
badsig, _ := store.Put(blob)
// Insert a sequence of transactions with a nonce gap in between to verify
@@ -354,7 +359,7 @@ gapped = make(map[uint64]struct{})
)
for _, nonce := range []uint64{0, 1, 3, 4, 6, 7} { // first gap at #2, another at #5
tx := makeTx(nonce, 1, 1, 1, gapper)
- blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ blob, _ := rlp.EncodeToBytes(tx)
id, _ := store.Put(blob)
if nonce < 2 {
@@ -371,7 +376,7 @@ dangling = make(map[uint64]struct{})
)
for _, nonce := range []uint64{1, 2, 3} { // first gap at #0, all set dangling
tx := makeTx(nonce, 1, 1, 1, dangler)
- blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ blob, _ := rlp.EncodeToBytes(tx)
id, _ := store.Put(blob)
dangling[id] = struct{}{}
@@ -384,7 +389,7 @@ filled = make(map[uint64]struct{})
)
for _, nonce := range []uint64{0, 1, 2} { // account nonce at 3, all set filled
tx := makeTx(nonce, 1, 1, 1, filler)
- blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ blob, _ := rlp.EncodeToBytes(tx)
id, _ := store.Put(blob)
filled[id] = struct{}{}
@@ -397,7 +402,7 @@ overlapped = make(map[uint64]struct{})
)
for _, nonce := range []uint64{0, 1, 2, 3} { // account nonce at 2, half filled
tx := makeTx(nonce, 1, 1, 1, overlapper)
- blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ blob, _ := rlp.EncodeToBytes(tx)
id, _ := store.Put(blob)
if nonce >= 2 {
@@ -419,7 +424,7 @@ tx = makeTx(uint64(i), 0, 0, 0, underpayer)
} else {
tx = makeTx(uint64(i), 1, 1, 1, underpayer)
}
- blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ blob, _ := rlp.EncodeToBytes(tx)
id, _ := store.Put(blob)
underpaid[id] = struct{}{}
@@ -438,7 +443,7 @@ tx = makeTx(uint64(i), 0, 0, 0, outpricer)
} else {
tx = makeTx(uint64(i), 1, 1, 1, outpricer)
}
- blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ blob, _ := rlp.EncodeToBytes(tx)
id, _ := store.Put(blob)
if i < 2 {
@@ -460,7 +465,7 @@ tx = makeTx(nonce, 1, 100, 1, exceeder)
} else {
tx = makeTx(nonce, 1, 1, 1, exceeder)
}
- blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ blob, _ := rlp.EncodeToBytes(tx)
id, _ := store.Put(blob)
exceeded[id] = struct{}{}
@@ -478,7 +483,7 @@ tx = makeTx(nonce, 1, 100, 1, overdrafter)
} else {
tx = makeTx(nonce, 1, 1, 1, overdrafter)
}
- blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ blob, _ := rlp.EncodeToBytes(tx)
id, _ := store.Put(blob)
if nonce < 1 {
@@ -494,7 +499,7 @@ overcapper, _ = crypto.GenerateKey()
overcapped = make(map[uint64]struct{})
)
for nonce := uint64(0); nonce < maxTxsPerAccount+3; nonce++ {
- blob, _ := rlp.EncodeToBytes(&blobTx{Tx: makeTx(nonce, 1, 1, 1, overcapper)})
+ blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, 1, 1, overcapper))
id, _ := store.Put(blob)
if nonce < maxTxsPerAccount {
@@ -625,7 +630,7 @@ totalSpent = uint256.NewInt(21000*(100+90+200+10+80+300) + blobSize*(55+66+77+33+22+11) + 100*6) // 21000 gas x price + 128KB x blobprice + value
)
for _, i := range []int{5, 3, 4, 2, 0, 1} { // Randomize the tx insertion order to force sorting on load
tx := makeTx(uint64(i), txExecTipCaps[i], txExecFeeCaps[i], txBlobFeeCaps[i], key)
- blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx})
+ blob, _ := rlp.EncodeToBytes(tx)
store.Put(blob)
}
store.Close()
@@ -718,9 +723,9 @@ tx1 = makeTx(0, 1, 1000, 90, key1)
tx2 = makeTx(0, 1, 800, 70, key2)
tx3 = makeTx(0, 1, 1500, 110, key3)
- blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1})
- blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2})
- blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3})
+ blob1, _ = rlp.EncodeToBytes(tx1)
+ blob2, _ = rlp.EncodeToBytes(tx2)
+ blob3, _ = rlp.EncodeToBytes(tx3)
heapOrder = []common.Address{addr2, addr1, addr3}
heapIndex = map[common.Address]int{addr2: 0, addr1: 1, addr3: 2}
@@ -794,9 +799,9 @@ tx1 = makeTx(0, 1, 1000, 100, key1)
tx2 = makeTx(0, 1, 800, 70, key2)
tx3 = makeTx(0, 1, 1500, 110, key3)
- blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
- blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
- blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
+ blob1, _ = rlp.EncodeToBytes(tx1)
+ blob2, _ = rlp.EncodeToBytes(tx2)
+ blob3, _ = rlp.EncodeToBytes(tx3)
keep = []common.Address{addr1, addr3}
drop = []common.Address{addr2}
@@ -1210,10 +1215,8 @@ statedb.SetNonce(addrs[acc], seed.nonce)
// Sign the seed transactions and store them in the data store
for _, tx := range seed.txs {
- var (
- signed, _ = types.SignNewTx(keys[acc], types.LatestSigner(testChainConfig), tx)
- blob, _ = rlp.EncodeToBytes(&blobTx{Tx: signed, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}})
- )
+ signed := types.MustSignNewTx(keys[acc], types.LatestSigner(testChainConfig), tx)
+ blob, _ := rlp.EncodeToBytes(signed)
store.Put(blob)
}
}
@@ -1236,7 +1239,7 @@
// Add each transaction one by one, verifying the pool internals in between
for j, add := range tt.adds {
signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(testChainConfig), add.tx)
- if err := pool.add(signed, []kzg4844.Blob{emptyBlob}, []kzg4844.Commitment{emptyBlobCommit}, []kzg4844.Proof{emptyBlobProof}); !errors.Is(err, add.err) {
+ if err := pool.add(signed); !errors.Is(err, add.err) {
t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err)
}
verifyPoolInternals(t, pool)
diff --git ethereum/go-ethereum/core/txpool/blobpool/evictheap.go taikoxyz/taiko-geth/core/txpool/blobpool/evictheap.go
index 7607a911c15bf1540ec4eb0d2f0e49d3175a5d1d..df594099f79b655ac9785f2dac6750d3909f98b1 100644
--- ethereum/go-ethereum/core/txpool/blobpool/evictheap.go
+++ taikoxyz/taiko-geth/core/txpool/blobpool/evictheap.go
@@ -44,7 +44,7 @@ addrs []common.Address // Heap of addresses to retrieve the cheapest out of
index map[common.Address]int // Indices into the heap for replacements
}
-// newPriceHeap creates a new heap of cheapets accounts in the blob pool to evict
+// newPriceHeap creates a new heap of cheapest accounts in the blob pool to evict
// from in case of over saturation.
func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap {
heap := &evictHeap{
diff --git ethereum/go-ethereum/core/txpool/blobpool/limbo.go taikoxyz/taiko-geth/core/txpool/blobpool/limbo.go
index 4cb5042c2bb5a416ea25b667b5b4520cf757b0b8..d1fe9c739477bce660141673d86dde1e896ddecc 100644
--- ethereum/go-ethereum/core/txpool/blobpool/limbo.go
+++ taikoxyz/taiko-geth/core/txpool/blobpool/limbo.go
@@ -21,7 +21,6 @@ "errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/holiman/billy"
@@ -31,12 +30,9 @@ // limboBlob is a wrapper around an opaque blobset that also contains the tx hash
// to which it belongs as well as the block number in which it was included for
// finality eviction.
type limboBlob struct {
- Owner common.Hash // Owner transaction's hash to support resurrecting reorged txs
- Block uint64 // Block in which the blob transaction was included
-
- Blobs []kzg4844.Blob // The opaque blobs originally part of the transaction
- Commits []kzg4844.Commitment // The commitments for the original blobs
- Proofs []kzg4844.Proof // The proofs verifying the commitments
+ TxHash common.Hash // Owner transaction's hash to support resurrecting reorged txs
+ Block uint64 // Block in which the blob transaction was included
+ Tx *types.Transaction
}
// limbo is a light, indexed database to temporarily store recently included
@@ -98,19 +94,19 @@ // by ignoring this data entry.
log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
return err
}
- if _, ok := l.index[item.Owner]; ok {
+ if _, ok := l.index[item.TxHash]; ok {
// This path is impossible, unless due to a programming error a blob gets
// inserted into the limbo which was already part of if. Recover gracefully
// by ignoring this data entry.
- log.Error("Dropping duplicate blob limbo entry", "owner", item.Owner, "id", id)
+ log.Error("Dropping duplicate blob limbo entry", "owner", item.TxHash, "id", id)
return errors.New("duplicate blob")
}
- l.index[item.Owner] = id
+ l.index[item.TxHash] = id
if _, ok := l.groups[item.Block]; !ok {
l.groups[item.Block] = make(map[uint64]common.Hash)
}
- l.groups[item.Block][id] = item.Owner
+ l.groups[item.Block][id] = item.TxHash
return nil
}
@@ -139,15 +135,15 @@ }
// push stores a new blob transaction into the limbo, waiting until finality for
// it to be automatically evicted.
-func (l *limbo) push(tx common.Hash, block uint64, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
+func (l *limbo) push(tx *types.Transaction, block uint64) error {
// If the blobs are already tracked by the limbo, consider it a programming
// error. There's not much to do against it, but be loud.
- if _, ok := l.index[tx]; ok {
+ if _, ok := l.index[tx.Hash()]; ok {
log.Error("Limbo cannot push already tracked blobs", "tx", tx)
return errors.New("already tracked blob transaction")
}
- if err := l.setAndIndex(tx, block, blobs, commits, proofs); err != nil {
- log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err)
+ if err := l.setAndIndex(tx, block); err != nil {
+ log.Error("Failed to set and index limboed blobs", "tx", tx, "err", err)
return err
}
return nil
@@ -156,21 +152,21 @@
// pull retrieves a previously pushed set of blobs back from the limbo, removing
// it at the same time. This method should be used when a previously included blob
// transaction gets reorged out.
-func (l *limbo) pull(tx common.Hash) ([]kzg4844.Blob, []kzg4844.Commitment, []kzg4844.Proof, error) {
+func (l *limbo) pull(tx common.Hash) (*types.Transaction, error) {
// If the blobs are not tracked by the limbo, there's not much to do. This
// can happen for example if a blob transaction is mined without pushing it
// into the network first.
id, ok := l.index[tx]
if !ok {
log.Trace("Limbo cannot pull non-tracked blobs", "tx", tx)
- return nil, nil, nil, errors.New("unseen blob transaction")
+ return nil, errors.New("unseen blob transaction")
}
item, err := l.getAndDrop(id)
if err != nil {
log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err)
- return nil, nil, nil, err
+ return nil, err
}
- return item.Blobs, item.Commits, item.Proofs, nil
+ return item.Tx, nil
}
// update changes the block number under which a blob transaction is tracked. This
@@ -180,33 +176,33 @@ // The method may log errors for various unexpcted scenarios but will not return
// any of it since there's no clear error case. Some errors may be due to coding
// issues, others caused by signers mining MEV stuff or swapping transactions. In
// all cases, the pool needs to continue operating.
-func (l *limbo) update(tx common.Hash, block uint64) {
+func (l *limbo) update(txhash common.Hash, block uint64) {
// If the blobs are not tracked by the limbo, there's not much to do. This
// can happen for example if a blob transaction is mined without pushing it
// into the network first.
- id, ok := l.index[tx]
+ id, ok := l.index[txhash]
if !ok {
- log.Trace("Limbo cannot update non-tracked blobs", "tx", tx)
+ log.Trace("Limbo cannot update non-tracked blobs", "tx", txhash)
return
}
// If there was no change in the blob's inclusion block, don't mess around
// with heavy database operations.
if _, ok := l.groups[block][id]; ok {
- log.Trace("Blob transaction unchanged in limbo", "tx", tx, "block", block)
+ log.Trace("Blob transaction unchanged in limbo", "tx", txhash, "block", block)
return
}
- // Retrieve the old blobs from the data store and write tehm back with a new
+ // Retrieve the old blobs from the data store and write them back with a new
// block number. IF anything fails, there's not much to do, go on.
item, err := l.getAndDrop(id)
if err != nil {
- log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err)
+ log.Error("Failed to get and drop limboed blobs", "tx", txhash, "id", id, "err", err)
return
}
- if err := l.setAndIndex(tx, block, item.Blobs, item.Commits, item.Proofs); err != nil {
- log.Error("Failed to set and index limboed blobs", "tx", tx, "err", err)
+ if err := l.setAndIndex(item.Tx, block); err != nil {
+ log.Error("Failed to set and index limboed blobs", "tx", txhash, "err", err)
return
}
- log.Trace("Blob transaction updated in limbo", "tx", tx, "old-block", item.Block, "new-block", block)
+ log.Trace("Blob transaction updated in limbo", "tx", txhash, "old-block", item.Block, "new-block", block)
}
// getAndDrop retrieves a blob item from the limbo store and deletes it both from
@@ -220,7 +216,7 @@ item := new(limboBlob)
if err = rlp.DecodeBytes(data, item); err != nil {
return nil, err
}
- delete(l.index, item.Owner)
+ delete(l.index, item.TxHash)
delete(l.groups[item.Block], id)
if len(l.groups[item.Block]) == 0 {
delete(l.groups, item.Block)
@@ -233,13 +229,12 @@ }
// setAndIndex assembles a limbo blob database entry and stores it, also updating
// the in-memory indices.
-func (l *limbo) setAndIndex(tx common.Hash, block uint64, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error {
+func (l *limbo) setAndIndex(tx *types.Transaction, block uint64) error {
+ txhash := tx.Hash()
item := &limboBlob{
- Owner: tx,
- Block: block,
- Blobs: blobs,
- Commits: commits,
- Proofs: proofs,
+ TxHash: txhash,
+ Block: block,
+ Tx: tx,
}
data, err := rlp.EncodeToBytes(item)
if err != nil {
@@ -249,10 +244,10 @@ id, err := l.store.Put(data)
if err != nil {
return err
}
- l.index[tx] = id
+ l.index[txhash] = id
if _, ok := l.groups[block]; !ok {
l.groups[block] = make(map[uint64]common.Hash)
}
- l.groups[block][id] = tx
+ l.groups[block][id] = txhash
return nil
}
diff --git ethereum/go-ethereum/core/txpool/blobpool/priority.go taikoxyz/taiko-geth/core/txpool/blobpool/priority.go
index 18e545c2a876156b186089cafc8bab61cf82fa76..a8332bd9b0e6680eb38465b4bec73fe6b4d02303 100644
--- ethereum/go-ethereum/core/txpool/blobpool/priority.go
+++ taikoxyz/taiko-geth/core/txpool/blobpool/priority.go
@@ -27,7 +27,7 @@ // log2_1_125 is used in the eviction priority calculation.
var log2_1_125 = math.Log2(1.125)
// evictionPriority calculates the eviction priority based on the algorithm
-// described in the BlobPool docs for a both fee components.
+// described in the BlobPool docs for both fee components.
//
// This method takes about 8ns on a very recent laptop CPU, recalculating about
// 125 million transaction priority values per second.
diff --git ethereum/go-ethereum/core/txpool/blobpool/slotter_test.go taikoxyz/taiko-geth/core/txpool/blobpool/slotter_test.go
index 2751a1872541f4b72782d7421447c68a2548aac6..a7b43b4d22245331d8c4c1fe6f574177dcf5249a 100644
--- ethereum/go-ethereum/core/txpool/blobpool/slotter_test.go
+++ taikoxyz/taiko-geth/core/txpool/blobpool/slotter_test.go
@@ -38,14 +38,16 @@ 1*blobSize + txAvgSize, // 1 blob + some expected tx infos
2*blobSize + txAvgSize, // 2 blob + some expected tx infos (could be fewer blobs and more tx data)
3*blobSize + txAvgSize, // 3 blob + some expected tx infos (could be fewer blobs and more tx data)
4*blobSize + txAvgSize, // 4 blob + some expected tx infos (could be fewer blobs and more tx data)
- 5*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
- 6*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
- 7*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
- 8*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
- 9*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
- 10*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
- 11*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
- 12*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos >= 4 blobs + max tx metadata size
+ 5*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 6*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 7*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 8*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 9*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 10*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 11*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 12*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 13*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 14*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos >= 4 blobs + max tx metadata size
}
if len(shelves) != len(want) {
t.Errorf("shelves count mismatch: have %d, want %d", len(shelves), len(want))
diff --git ethereum/go-ethereum/core/txpool/legacypool/legacypool.go taikoxyz/taiko-geth/core/txpool/legacypool/legacypool.go
index b1ae8bac81408683f18bc0c02169f8ba45048a51..e71204185f0306ec5d2e6ef4b79fe5ca69319809 100644
--- ethereum/go-ethereum/core/txpool/legacypool/legacypool.go
+++ taikoxyz/taiko-geth/core/txpool/legacypool/legacypool.go
@@ -208,7 +208,6 @@ chainconfig *params.ChainConfig
chain BlockChain
gasTip atomic.Pointer[big.Int]
txFeed event.Feed
- scope event.SubscriptionScope
signer types.Signer
mu sync.RWMutex
@@ -298,7 +297,20 @@ pool.reserve = reserve
// Set the basic pool parameters
pool.gasTip.Store(gasTip)
- pool.reset(nil, head)
+
+ // Initialize the state with head block, or fallback to empty one in
+ // case the head state is not available(might occur when node is not
+ // fully synced).
+ statedb, err := pool.chain.StateAt(head.Root)
+ if err != nil {
+ statedb, err = pool.chain.StateAt(types.EmptyRootHash)
+ }
+ if err != nil {
+ return err
+ }
+ pool.currentHead.Store(head)
+ pool.currentState = statedb
+ pool.pendingNonces = newNoncer(statedb)
// Start the reorg loop early, so it can handle requests generated during
// journal loading.
@@ -391,9 +403,6 @@ }
// Close terminates the transaction pool.
func (pool *LegacyPool) Close() error {
- // Unsubscribe all subscriptions registered from txpool
- pool.scope.Close()
-
// Terminate the pool reorger and return
close(pool.reorgShutdownCh)
pool.wg.Wait()
@@ -406,16 +415,20 @@ return nil
}
// Reset implements txpool.SubPool, allowing the legacy pool's internal state to be
-// kept in sync with the main transacion pool's internal state.
+// kept in sync with the main transaction pool's internal state.
func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) {
wait := pool.requestReset(oldHead, newHead)
<-wait
}
-// SubscribeTransactions registers a subscription of NewTxsEvent and
-// starts sending event to the given channel.
-func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription {
- return pool.scope.Track(pool.txFeed.Subscribe(ch))
+// SubscribeTransactions registers a subscription for new transaction events,
+// supporting feeding only newly seen or also resurrected transactions.
+func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
+ // The legacy pool has a very messed up internal shuffling, so it's kind of
+ // hard to separate newly discovered transaction from resurrected ones. This
+ // is because the new txs are added to the queue, resurrected ones too and
+ // reorgs run lazily, so separating the two would need a marker.
+ return pool.txFeed.Subscribe(ch)
}
// SetGasTip updates the minimum gas tip required by the transaction pool for a
@@ -535,10 +548,12 @@ for i := 0; i < len(txs); i++ {
lazies[i] = &txpool.LazyTransaction{
Pool: pool,
Hash: txs[i].Hash(),
- Tx: &txpool.Transaction{Tx: txs[i]},
+ Tx: txs[i],
Time: txs[i].Time(),
GasFeeCap: txs[i].GasFeeCap(),
GasTipCap: txs[i].GasTipCap(),
+ Gas: txs[i].Gas(),
+ BlobGas: txs[i].BlobGas(),
}
}
pending[addr] = lazies
@@ -588,7 +603,7 @@ }
if local {
opts.MinTip = new(big.Int)
}
- if err := txpool.ValidateTransaction(tx, nil, nil, nil, pool.currentHead.Load(), pool.signer, opts); err != nil {
+ if err := txpool.ValidateTransaction(tx, pool.currentHead.Load(), pool.signer, opts); err != nil {
return err
}
return nil
@@ -637,7 +652,7 @@ // pending promotion and execution. If the transaction is a replacement for an already
// pending or queued one, it overwrites the previous transaction if its price is higher.
//
// If a newly added transaction is marked as local, its sending account will be
-// be added to the allowlist, preventing any associated transaction from being dropped
+// added to the allowlist, preventing any associated transaction from being dropped
// out of the pool due to pricing constraints.
func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
// If the transaction is already known, discard it
@@ -900,26 +915,13 @@ pool.beats[addr] = time.Now()
return true
}
-// Add enqueues a batch of transactions into the pool if they are valid. Depending
-// on the local flag, full pricing contraints will or will not be applied.
-//
-// If sync is set, the method will block until all internal maintenance related
-// to the add is finished. Only use this during tests for determinism!
-func (pool *LegacyPool) Add(txs []*txpool.Transaction, local bool, sync bool) []error {
- unwrapped := make([]*types.Transaction, len(txs))
- for i, tx := range txs {
- unwrapped[i] = tx.Tx
- }
- return pool.addTxs(unwrapped, local, sync)
-}
-
// addLocals enqueues a batch of transactions into the pool if they are valid, marking the
-// senders as a local ones, ensuring they go around the local pricing constraints.
+// senders as local ones, ensuring they go around the local pricing constraints.
//
// This method is used to add transactions from the RPC API and performs synchronous pool
// reorganization and event propagation.
func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error {
- return pool.addTxs(txs, !pool.config.NoLocals, true)
+ return pool.Add(txs, !pool.config.NoLocals, true)
}
// addLocal enqueues a single local transaction into the pool if it is valid. This is
@@ -935,7 +937,7 @@ //
// This method is used to add transactions from the p2p network and does not wait for pool
// reorganization and internal event propagation.
func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error {
- return pool.addTxs(txs, false, false)
+ return pool.Add(txs, false, false)
}
// addRemote enqueues a single transaction into the pool if it is valid. This is a convenience
@@ -947,16 +949,20 @@ }
// addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method.
func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error {
- return pool.addTxs(txs, false, true)
+ return pool.Add(txs, false, true)
}
// This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error {
- return pool.addTxs([]*types.Transaction{tx}, false, true)[0]
+ return pool.Add([]*types.Transaction{tx}, false, true)[0]
}
-// addTxs attempts to queue a batch of transactions if they are valid.
-func (pool *LegacyPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
+// Add enqueues a batch of transactions into the pool if they are valid. Depending
+// on the local flag, full pricing constraints will or will not be applied.
+//
+// If sync is set, the method will block until all internal maintenance related
+// to the add is finished. Only use this during tests for determinism!
+func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error {
// Filter out known ones without obtaining the pool lock or recovering signatures
var (
errs = make([]error, len(txs))
@@ -1042,12 +1048,12 @@ return txpool.TxStatusUnknown
}
// Get returns a transaction if it is contained in the pool and nil otherwise.
-func (pool *LegacyPool) Get(hash common.Hash) *txpool.Transaction {
+func (pool *LegacyPool) Get(hash common.Hash) *types.Transaction {
tx := pool.get(hash)
if tx == nil {
return nil
}
- return &txpool.Transaction{Tx: tx}
+ return tx
}
// get returns a transaction if it is contained in the pool and nil otherwise.
diff --git ethereum/go-ethereum/core/txpool/legacypool/list.go taikoxyz/taiko-geth/core/txpool/legacypool/list.go
index d5d24c85a5019f0ddce8ce8ea5fa1eda6e57532f..05ae0b58cd59766145c3dc4c80045878368f8e28 100644
--- ethereum/go-ethereum/core/txpool/legacypool/list.go
+++ taikoxyz/taiko-geth/core/txpool/legacypool/list.go
@@ -53,9 +53,10 @@
// sortedMap is a nonce->transaction hash map with a heap based index to allow
// iterating over the contents in a nonce-incrementing way.
type sortedMap struct {
- items map[uint64]*types.Transaction // Hash map storing the transaction data
- index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode)
- cache types.Transactions // Cache of the transactions already sorted
+ items map[uint64]*types.Transaction // Hash map storing the transaction data
+ index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode)
+ cache types.Transactions // Cache of the transactions already sorted
+ cacheMu sync.Mutex // Mutex covering the cache
}
// newSortedMap creates a new nonce-sorted transaction map.
@@ -78,7 +79,9 @@ nonce := tx.Nonce()
if m.items[nonce] == nil {
heap.Push(m.index, nonce)
}
+ m.cacheMu.Lock()
m.items[nonce], m.cache = tx, nil
+ m.cacheMu.Unlock()
}
// Forward removes all transactions from the map with a nonce lower than the
@@ -94,9 +97,11 @@ removed = append(removed, m.items[nonce])
delete(m.items, nonce)
}
// If we had a cached order, shift the front
+ m.cacheMu.Lock()
if m.cache != nil {
m.cache = m.cache[len(removed):]
}
+ m.cacheMu.Unlock()
return removed
}
@@ -120,7 +125,9 @@ for nonce := range m.items {
*m.index = append(*m.index, nonce)
}
heap.Init(m.index)
+ m.cacheMu.Lock()
m.cache = nil
+ m.cacheMu.Unlock()
}
// filter is identical to Filter, but **does not** regenerate the heap. This method
@@ -136,7 +143,9 @@ delete(m.items, nonce)
}
}
if len(removed) > 0 {
+ m.cacheMu.Lock()
m.cache = nil
+ m.cacheMu.Unlock()
}
return removed
}
@@ -160,9 +169,11 @@ *m.index = (*m.index)[:threshold]
heap.Init(m.index)
// If we had a cache, shift the back
+ m.cacheMu.Lock()
if m.cache != nil {
m.cache = m.cache[:len(m.cache)-len(drops)]
}
+ m.cacheMu.Unlock()
return drops
}
@@ -182,7 +193,9 @@ break
}
}
delete(m.items, nonce)
+ m.cacheMu.Lock()
m.cache = nil
+ m.cacheMu.Unlock()
return true
}
@@ -192,7 +205,7 @@ // provided nonce that is ready for processing. The returned transactions will be
// removed from the list.
//
// Note, all transactions with nonces lower than start will also be returned to
-// prevent getting into and invalid state. This is not something that should ever
+// prevent getting into an invalid state. This is not something that should ever
// happen but better to be self correcting than failing!
func (m *sortedMap) Ready(start uint64) types.Transactions {
// Short circuit if no transactions are available
@@ -206,7 +219,9 @@ ready = append(ready, m.items[next])
delete(m.items, next)
heap.Pop(m.index)
}
+ m.cacheMu.Lock()
m.cache = nil
+ m.cacheMu.Unlock()
return ready
}
@@ -217,6 +232,8 @@ return len(m.items)
}
func (m *sortedMap) flatten() types.Transactions {
+ m.cacheMu.Lock()
+ defer m.cacheMu.Unlock()
// If the sorting was not cached yet, create and cache it
if m.cache == nil {
m.cache = make(types.Transactions, 0, len(m.items))
@@ -232,8 +249,8 @@ // Flatten creates a nonce-sorted slice of transactions based on the loosely
// sorted internal representation. The result of the sorting is cached in case
// it's requested again before any modifications are made to the contents.
func (m *sortedMap) Flatten() types.Transactions {
- // Copy the cache to prevent accidental modifications
cache := m.flatten()
+ // Copy the cache to prevent accidental modification
txs := make(types.Transactions, len(cache))
copy(txs, cache)
return txs
@@ -404,7 +421,7 @@ // provided nonce that is ready for processing. The returned transactions will be
// removed from the list.
//
// Note, all transactions with nonces lower than start will also be returned to
-// prevent getting into and invalid state. This is not something that should ever
+// prevent getting into an invalid state. This is not something that should ever
// happen but better to be self correcting than failing!
func (l *list) Ready(start uint64) types.Transactions {
txs := l.txs.Ready(start)
diff --git ethereum/go-ethereum/core/vm/runtime/env.go taikoxyz/taiko-geth/core/vm/runtime/env.go
index ffc631a90c95aae39999b570b8b5252f14417820..64aa550a25032526287f6f8c1ca63989eae5ba61 100644
--- ethereum/go-ethereum/core/vm/runtime/env.go
+++ taikoxyz/taiko-geth/core/vm/runtime/env.go
@@ -37,6 +37,8 @@ Time: cfg.Time,
Difficulty: cfg.Difficulty,
GasLimit: cfg.GasLimit,
BaseFee: cfg.BaseFee,
+ BlobBaseFee: cfg.BlobBaseFee,
+ Random: cfg.Random,
}
return vm.NewEVM(blockContext, txContext, cfg.State, cfg.ChainConfig, cfg.EVMConfig)
diff --git ethereum/go-ethereum/core/vm/runtime/runtime.go taikoxyz/taiko-geth/core/vm/runtime/runtime.go
index a3e75c67212c379a10694f61faf3f9f8fe189c9b..cfd7e4dbc4ee3b8b3a47436c2bfa188f1abc9174 100644
--- ethereum/go-ethereum/core/vm/runtime/runtime.go
+++ taikoxyz/taiko-geth/core/vm/runtime/runtime.go
@@ -44,7 +44,9 @@ Value *big.Int
Debug bool
EVMConfig vm.Config
BaseFee *big.Int
+ BlobBaseFee *big.Int
BlobHashes []common.Hash
+ Random *common.Hash
State *state.StateDB
GetHashFn func(n uint64) common.Hash
@@ -93,6 +95,9 @@ }
}
if cfg.BaseFee == nil {
cfg.BaseFee = big.NewInt(params.InitialBaseFee)
+ }
+ if cfg.BlobBaseFee == nil {
+ cfg.BlobBaseFee = new(big.Int)
}
}
diff --git ethereum/go-ethereum/crypto/bls12381/g2.go taikoxyz/taiko-geth/crypto/bls12381/g2.go
index 4d6f1ff11de82c6dbd961866d298d91fcca6659e..e5fe75af20c726b5d2b942590ef0108ab4457b32 100644
--- ethereum/go-ethereum/crypto/bls12381/g2.go
+++ taikoxyz/taiko-geth/crypto/bls12381/g2.go
@@ -121,7 +121,7 @@ }
return p, nil
}
-// DecodePoint given encoded (x, y) coordinates in 256 bytes returns a valid G1 Point.
+// DecodePoint given encoded (x, y) coordinates in 256 bytes returns a valid G2 Point.
func (g *G2) DecodePoint(in []byte) (*PointG2, error) {
if len(in) != 256 {
return nil, errors.New("invalid g2 point length")
diff --git ethereum/go-ethereum/crypto/bn256/cloudflare/optate.go taikoxyz/taiko-geth/crypto/bn256/cloudflare/optate.go
index b71e50e3a21ceff53c98027c54c592b977b4ee06..e8caa7a08656953ebd6306fa78f3efc1f3351b37 100644
--- ethereum/go-ethereum/crypto/bn256/cloudflare/optate.go
+++ taikoxyz/taiko-geth/crypto/bn256/cloudflare/optate.go
@@ -199,9 +199,8 @@ mulLine(ret, a, b, c)
r = newR
r2.Square(&minusQ2.y)
- a, b, c, newR = lineFunctionAdd(r, minusQ2, bAffine, r2)
+ a, b, c, _ = lineFunctionAdd(r, minusQ2, bAffine, r2)
mulLine(ret, a, b, c)
- r = newR
return ret
}
diff --git ethereum/go-ethereum/eth/protocols/eth/broadcast.go taikoxyz/taiko-geth/eth/protocols/eth/broadcast.go
index c431aa4005a674b09dad20dfb2d066d64d43b0ba..3045303f222e126ce8362bfa5b92ad48e6b289b3 100644
--- ethereum/go-ethereum/eth/protocols/eth/broadcast.go
+++ taikoxyz/taiko-geth/eth/protocols/eth/broadcast.go
@@ -81,8 +81,8 @@ size common.StorageSize
)
for i := 0; i < len(queue) && size < maxTxPacketSize; i++ {
if tx := p.txpool.Get(queue[i]); tx != nil {
- txs = append(txs, tx.Tx)
- size += common.StorageSize(tx.Tx.Size())
+ txs = append(txs, tx)
+ size += common.StorageSize(tx.Size())
}
hashesCount++
}
@@ -151,8 +151,8 @@ )
for count = 0; count < len(queue) && size < maxTxPacketSize; count++ {
if tx := p.txpool.Get(queue[count]); tx != nil {
pending = append(pending, queue[count])
- pendingTypes = append(pendingTypes, tx.Tx.Type())
- pendingSizes = append(pendingSizes, uint32(tx.Tx.Size()))
+ pendingTypes = append(pendingTypes, tx.Type())
+ pendingSizes = append(pendingSizes, uint32(tx.Size()))
size += common.HashLength
}
}
diff --git ethereum/go-ethereum/eth/protocols/eth/discovery.go taikoxyz/taiko-geth/eth/protocols/eth/discovery.go
index 87857244b58988c2afca79a38ddb3c5130cde8e2..a7bdd47daf07df237acf996a002bb1d956ea2eb8 100644
--- ethereum/go-ethereum/eth/protocols/eth/discovery.go
+++ taikoxyz/taiko-geth/eth/protocols/eth/discovery.go
@@ -61,6 +61,6 @@ // currentENREntry constructs an `eth` ENR entry based on the current state of the chain.
func currentENREntry(chain *core.BlockChain) *enrEntry {
head := chain.CurrentHeader()
return &enrEntry{
- ForkID: forkid.NewID(chain.Config(), chain.Genesis().Hash(), head.Number.Uint64(), head.Time),
+ ForkID: forkid.NewID(chain.Config(), chain.Genesis(), head.Number.Uint64(), head.Time),
}
}
diff --git ethereum/go-ethereum/eth/protocols/eth/handler.go taikoxyz/taiko-geth/eth/protocols/eth/handler.go
index 7f51d4f5cdd3abe9f79ed5f6374db0a1887cfd60..42d0412a127c8b62ba0a5693cf98dde6652122ed 100644
--- ethereum/go-ethereum/eth/protocols/eth/handler.go
+++ taikoxyz/taiko-geth/eth/protocols/eth/handler.go
@@ -23,7 +23,7 @@ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -43,10 +43,6 @@ // maxBodiesServe is the maximum number of block bodies to serve. This number
// is mostly there to limit the number of disk lookups. With 24KB block sizes
// nowadays, the practical limit will always be softResponseLimit.
maxBodiesServe = 1024
-
- // maxNodeDataServe is the maximum number of state trie nodes to serve. This
- // number is there to limit the number of disk lookups.
- maxNodeDataServe = 1024
// maxReceiptsServe is the maximum number of block receipts to serve. This
// number is mostly there to limit the number of disk lookups. With block
@@ -90,16 +86,20 @@
// TxPool defines the methods needed by the protocol handler to serve transactions.
type TxPool interface {
// Get retrieves the transaction from the local txpool with the given hash.
- Get(hash common.Hash) *txpool.Transaction
+ Get(hash common.Hash) *types.Transaction
}
// MakeProtocols constructs the P2P protocol definitions for `eth`.
func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {
- protocols := make([]p2p.Protocol, len(ProtocolVersions))
- for i, version := range ProtocolVersions {
+ protocols := make([]p2p.Protocol, 0, len(ProtocolVersions))
+ for _, version := range ProtocolVersions {
+ // Blob transactions require eth/68 announcements, disable everything else
+ if version <= ETH67 && backend.Chain().Config().CancunTime != nil {
+ continue
+ }
version := version // Closure
- protocols[i] = p2p.Protocol{
+ protocols = append(protocols, p2p.Protocol{
Name: ProtocolName,
Version: version,
Length: protocolLengths[version],
@@ -119,7 +119,7 @@ return backend.PeerInfo(id)
},
Attributes: []enr.Entry{currentENREntry(backend.Chain())},
DialCandidates: dnsdisc,
- }
+ })
}
return protocols
}
@@ -166,36 +166,19 @@ Decode(val interface{}) error
Time() time.Time
}
-var eth66 = map[uint64]msgHandler{
- NewBlockHashesMsg: handleNewBlockhashes,
- NewBlockMsg: handleNewBlock,
- TransactionsMsg: handleTransactions,
- NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66,
- GetBlockHeadersMsg: handleGetBlockHeaders66,
- BlockHeadersMsg: handleBlockHeaders66,
- GetBlockBodiesMsg: handleGetBlockBodies66,
- BlockBodiesMsg: handleBlockBodies66,
- GetNodeDataMsg: handleGetNodeData66,
- NodeDataMsg: handleNodeData66,
- GetReceiptsMsg: handleGetReceipts66,
- ReceiptsMsg: handleReceipts66,
- GetPooledTransactionsMsg: handleGetPooledTransactions66,
- PooledTransactionsMsg: handlePooledTransactions66,
-}
-
var eth67 = map[uint64]msgHandler{
NewBlockHashesMsg: handleNewBlockhashes,
NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions,
- NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66,
- GetBlockHeadersMsg: handleGetBlockHeaders66,
- BlockHeadersMsg: handleBlockHeaders66,
- GetBlockBodiesMsg: handleGetBlockBodies66,
- BlockBodiesMsg: handleBlockBodies66,
- GetReceiptsMsg: handleGetReceipts66,
- ReceiptsMsg: handleReceipts66,
- GetPooledTransactionsMsg: handleGetPooledTransactions66,
- PooledTransactionsMsg: handlePooledTransactions66,
+ NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67,
+ GetBlockHeadersMsg: handleGetBlockHeaders,
+ BlockHeadersMsg: handleBlockHeaders,
+ GetBlockBodiesMsg: handleGetBlockBodies,
+ BlockBodiesMsg: handleBlockBodies,
+ GetReceiptsMsg: handleGetReceipts,
+ ReceiptsMsg: handleReceipts,
+ GetPooledTransactionsMsg: handleGetPooledTransactions,
+ PooledTransactionsMsg: handlePooledTransactions,
}
var eth68 = map[uint64]msgHandler{
@@ -203,14 +186,14 @@ NewBlockHashesMsg: handleNewBlockhashes,
NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions,
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68,
- GetBlockHeadersMsg: handleGetBlockHeaders66,
- BlockHeadersMsg: handleBlockHeaders66,
- GetBlockBodiesMsg: handleGetBlockBodies66,
- BlockBodiesMsg: handleBlockBodies66,
- GetReceiptsMsg: handleGetReceipts66,
- ReceiptsMsg: handleReceipts66,
- GetPooledTransactionsMsg: handleGetPooledTransactions66,
- PooledTransactionsMsg: handlePooledTransactions66,
+ GetBlockHeadersMsg: handleGetBlockHeaders,
+ BlockHeadersMsg: handleBlockHeaders,
+ GetBlockBodiesMsg: handleGetBlockBodies,
+ BlockBodiesMsg: handleBlockBodies,
+ GetReceiptsMsg: handleGetReceipts,
+ ReceiptsMsg: handleReceipts,
+ GetPooledTransactionsMsg: handleGetPooledTransactions,
+ PooledTransactionsMsg: handlePooledTransactions,
}
// handleMessage is invoked whenever an inbound message is received from a remote
@@ -226,14 +209,10 @@ return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
}
defer msg.Discard()
- var handlers = eth66
- if peer.Version() == ETH67 {
- handlers = eth67
- }
+ var handlers = eth67
if peer.Version() >= ETH68 {
handlers = eth68
}
-
// Track the amount of time it takes to serve the request and run the handler
if metrics.Enabled {
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
diff --git ethereum/go-ethereum/eth/protocols/eth/handler_test.go taikoxyz/taiko-geth/eth/protocols/eth/handler_test.go
index 3f1dc9fe76e6e8449d273f95ffe0c3bb385deea3..41e18bfb3e01b7f32d7b29cca720fda9de26a4cb 100644
--- ethereum/go-ethereum/eth/protocols/eth/handler_test.go
+++ taikoxyz/taiko-geth/eth/protocols/eth/handler_test.go
@@ -28,7 +28,6 @@ "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types"
@@ -112,7 +111,7 @@ if _, err := chain.InsertChain(bs); err != nil {
panic(err)
}
for _, block := range bs {
- chain.StateCache().TrieDB().Commit(block.Root(), false)
+ chain.TrieDB().Commit(block.Root(), false)
}
txconfig := legacypool.DefaultConfig
txconfig.Journal = "" // Don't litter the disk with test journals
@@ -151,7 +150,6 @@ panic("data processing tests should be done in the handler package")
}
// Tests that block headers can be retrieved from a remote chain based on user queries.
-func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) }
func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) }
func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) }
@@ -178,29 +176,29 @@ }
// Create a batch of tests for various scenarios
limit := uint64(maxHeadersServe)
tests := []struct {
- query *GetBlockHeadersPacket // The query to execute for header retrieval
- expect []common.Hash // The hashes of the block whose headers are expected
+ query *GetBlockHeadersRequest // The query to execute for header retrieval
+ expect []common.Hash // The hashes of the block whose headers are expected
}{
// A single random block should be retrievable by hash
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
},
// A single random block should be retrievable by number
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
},
// Multiple headers should be retrievable in both directions
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3},
[]common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 1).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 2).Hash(),
},
}, {
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
[]common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 - 1).Hash(),
@@ -209,14 +207,14 @@ },
},
// Multiple headers with skip lists should be retrievable
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
[]common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 4).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 8).Hash(),
},
}, {
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 - 4).Hash(),
@@ -225,31 +223,31 @@ },
},
// The chain endpoints should be retrievable
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(0).Hash()},
},
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1},
[]common.Hash{backend.chain.CurrentBlock().Hash()},
},
{ // If the peer requests a bit into the future, we deliver what we have
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10},
[]common.Hash{backend.chain.CurrentBlock().Hash()},
},
// Ensure protocol limits are honored
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true},
getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit),
},
// Check that requesting more than available is handled gracefully
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3},
[]common.Hash{
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(),
},
}, {
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{
backend.chain.GetBlockByNumber(4).Hash(),
backend.chain.GetBlockByNumber(0).Hash(),
@@ -257,13 +255,13 @@ },
},
// Check that requesting more than available is handled gracefully, even if mid skip
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3},
[]common.Hash{
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(),
},
}, {
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
[]common.Hash{
backend.chain.GetBlockByNumber(4).Hash(),
backend.chain.GetBlockByNumber(1).Hash(),
@@ -271,7 +269,7 @@ },
},
// Check a corner case where requesting more can iterate past the endpoints
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true},
[]common.Hash{
backend.chain.GetBlockByNumber(2).Hash(),
backend.chain.GetBlockByNumber(1).Hash(),
@@ -280,24 +278,24 @@ },
},
// Check a corner case where skipping overflow loops back into the chain start
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},
[]common.Hash{
backend.chain.GetBlockByNumber(3).Hash(),
},
},
// Check a corner case where skipping overflow loops back to the same header
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},
[]common.Hash{
backend.chain.GetBlockByNumber(1).Hash(),
},
},
// Check that non existing headers aren't returned
{
- &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1},
[]common.Hash{},
}, {
- &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1},
+ &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1},
[]common.Hash{},
},
}
@@ -309,13 +307,13 @@ for _, hash := range tt.expect {
headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
}
// Send the hash request and verify the response
- p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
- RequestId: 123,
- GetBlockHeadersPacket: tt.query,
+ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
+ RequestId: 123,
+ GetBlockHeadersRequest: tt.query,
})
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{
- RequestId: 123,
- BlockHeadersPacket: headers,
+ if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{
+ RequestId: 123,
+ BlockHeadersRequest: headers,
}); err != nil {
t.Errorf("test %d: headers mismatch: %v", i, err)
}
@@ -324,11 +322,11 @@ if tt.query.Origin.Hash == (common.Hash{}) {
if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
- p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
- RequestId: 456,
- GetBlockHeadersPacket: tt.query,
+ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
+ RequestId: 456,
+ GetBlockHeadersRequest: tt.query,
})
- expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers}
+ expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers}
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil {
t.Errorf("test %d by hash: headers mismatch: %v", i, err)
}
@@ -338,7 +336,6 @@ }
}
// Tests that block contents can be retrieved from a remote chain based on their hashes.
-func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) }
func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) }
func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) }
@@ -420,139 +417,20 @@ }
}
// Send the hash request and verify the response
- p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{
- RequestId: 123,
- GetBlockBodiesPacket: hashes,
+ p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{
+ RequestId: 123,
+ GetBlockBodiesRequest: hashes,
})
- if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{
- RequestId: 123,
- BlockBodiesPacket: bodies,
+ if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{
+ RequestId: 123,
+ BlockBodiesResponse: bodies,
}); err != nil {
t.Fatalf("test %d: bodies mismatch: %v", i, err)
}
}
}
-// Tests that the state trie nodes can be retrieved based on hashes.
-func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66, false) }
-func TestGetNodeData67(t *testing.T) { testGetNodeData(t, ETH67, true) }
-func TestGetNodeData68(t *testing.T) { testGetNodeData(t, ETH68, true) }
-
-func testGetNodeData(t *testing.T, protocol uint, drop bool) {
- t.Parallel()
-
- // Define three accounts to simulate transactions with
- acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
- acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
- acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
- acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
-
- signer := types.HomesteadSigner{}
- // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makers_test)
- generator := func(i int, block *core.BlockGen) {
- switch i {
- case 0:
- // In block 1, the test bank sends account #1 some ether.
- tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
- block.AddTx(tx)
- case 1:
- // In block 2, the test bank sends some more ether to account #1.
- // acc1Addr passes it on to account #2.
- tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
- tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key)
- block.AddTx(tx1)
- block.AddTx(tx2)
- case 2:
- // Block 3 is empty but was mined by account #2.
- block.SetCoinbase(acc2Addr)
- block.SetExtra([]byte("yeehaw"))
- case 3:
- // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
- b2 := block.PrevBlock(1).Header()
- b2.Extra = []byte("foo")
- block.AddUncle(b2)
- b3 := block.PrevBlock(2).Header()
- b3.Extra = []byte("foo")
- block.AddUncle(b3)
- }
- }
- // Assemble the test environment
- backend := newTestBackendWithGenerator(4, false, generator)
- defer backend.close()
-
- peer, _ := newTestPeer("peer", protocol, backend)
- defer peer.close()
-
- // Collect all state tree hashes.
- var hashes []common.Hash
- it := backend.db.NewIterator(nil, nil)
- for it.Next() {
- if key := it.Key(); len(key) == common.HashLength {
- hashes = append(hashes, common.BytesToHash(key))
- }
- }
- it.Release()
-
- // Request all hashes.
- p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{
- RequestId: 123,
- GetNodeDataPacket: hashes,
- })
- msg, err := peer.app.ReadMsg()
- if !drop {
- if err != nil {
- t.Fatalf("failed to read node data response: %v", err)
- }
- } else {
- if err != nil {
- return
- }
- t.Fatalf("succeeded to read node data response on non-supporting protocol: %v", msg)
- }
- if msg.Code != NodeDataMsg {
- t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg)
- }
- var res NodeDataPacket66
- if err := msg.Decode(&res); err != nil {
- t.Fatalf("failed to decode response node data: %v", err)
- }
-
- // Verify that all hashes correspond to the requested data.
- data := res.NodeDataPacket
- for i, want := range hashes {
- if hash := crypto.Keccak256Hash(data[i]); hash != want {
- t.Errorf("data hash mismatch: have %x, want %x", hash, want)
- }
- }
-
- // Reconstruct state tree from the received data.
- reconstructDB := rawdb.NewMemoryDatabase()
- for i := 0; i < len(data); i++ {
- rawdb.WriteLegacyTrieNode(reconstructDB, hashes[i], data[i])
- }
-
- // Sanity check whether all state matches.
- accounts := []common.Address{testAddr, acc1Addr, acc2Addr}
- for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ {
- root := backend.chain.GetBlockByNumber(i).Root()
- reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil)
- for j, acc := range accounts {
- state, _ := backend.chain.StateAt(root)
- bw := state.GetBalance(acc)
- bh := reconstructed.GetBalance(acc)
-
- if (bw == nil) != (bh == nil) {
- t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
- }
- if bw != nil && bh != nil && bw.Cmp(bh) != 0 {
- t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
- }
- }
- }
-}
-
// Tests that the transaction receipts can be retrieved based on hashes.
-func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) }
func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) }
func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) }
@@ -613,13 +491,13 @@ hashes = append(hashes, block.Hash())
receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
}
// Send the hash request and verify the response
- p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{
- RequestId: 123,
- GetReceiptsPacket: hashes,
+ p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{
+ RequestId: 123,
+ GetReceiptsRequest: hashes,
})
- if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{
- RequestId: 123,
- ReceiptsPacket: receipts,
+ if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{
+ RequestId: 123,
+ ReceiptsResponse: receipts,
}); err != nil {
t.Errorf("receipts mismatch: %v", err)
}
diff --git ethereum/go-ethereum/eth/protocols/eth/handlers.go taikoxyz/taiko-geth/eth/protocols/eth/handlers.go
index f9fbf72b7b1c6ebae6b8cc40df60da5377993a42..069e92dadf90f1c64a26846620b4682271390aec 100644
--- ethereum/go-ethereum/eth/protocols/eth/handlers.go
+++ taikoxyz/taiko-geth/eth/protocols/eth/handlers.go
@@ -28,20 +28,19 @@ "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
-// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders
-func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
+func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
// Decode the complex header query
- var query GetBlockHeadersPacket66
+ var query GetBlockHeadersPacket
if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
- response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer)
+ response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersRequest, peer)
return peer.ReplyBlockHeadersRLP(query.RequestId, response)
}
// ServiceGetBlockHeadersQuery assembles the response to a header query. It is
// exposed to allow external packages to test protocol behavior.
-func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue {
+func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue {
if query.Skip == 0 {
// The fast path: when the request is for a contiguous segment of headers.
return serviceContiguousBlockHeaderQuery(chain, query)
@@ -50,7 +49,7 @@ return serviceNonContiguousBlockHeaderQuery(chain, query, peer)
}
}
-func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue {
+func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest, peer *Peer) []rlp.RawValue {
hashMode := query.Origin.Hash != (common.Hash{})
first := true
maxNonCanonical := uint64(100)
@@ -139,7 +138,7 @@ }
return headers
}
-func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket) []rlp.RawValue {
+func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue {
count := query.Amount
if count > maxHeadersServe {
count = maxHeadersServe
@@ -202,19 +201,19 @@ return headers
}
}
-func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
+func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
// Decode the block body retrieval message
- var query GetBlockBodiesPacket66
+ var query GetBlockBodiesPacket
if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
- response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket)
+ response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesRequest)
return peer.ReplyBlockBodiesRLP(query.RequestId, response)
}
// ServiceGetBlockBodiesQuery assembles the response to a body query. It is
// exposed to allow external packages to test protocol behavior.
-func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue {
+func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesRequest) []rlp.RawValue {
// Gather blocks until the fetch or network limits is reached
var (
bytes int
@@ -233,56 +232,19 @@ }
return bodies
}
-func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error {
- // Decode the trie node data retrieval message
- var query GetNodeDataPacket66
- if err := msg.Decode(&query); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- response := ServiceGetNodeDataQuery(backend.Chain(), query.GetNodeDataPacket)
- return peer.ReplyNodeData(query.RequestId, response)
-}
-
-// ServiceGetNodeDataQuery assembles the response to a node data query. It is
-// exposed to allow external packages to test protocol behavior.
-func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte {
- // Gather state data until the fetch or network limits is reached
- var (
- bytes int
- nodes [][]byte
- )
- for lookups, hash := range query {
- if bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe ||
- lookups >= 2*maxNodeDataServe {
- break
- }
- // Retrieve the requested state entry
- entry, err := chain.TrieNode(hash)
- if len(entry) == 0 || err != nil {
- // Read the contract code with prefix only to save unnecessary lookups.
- entry, err = chain.ContractCodeWithPrefix(hash)
- }
- if err == nil && len(entry) > 0 {
- nodes = append(nodes, entry)
- bytes += len(entry)
- }
- }
- return nodes
-}
-
-func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error {
+func handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error {
// Decode the block receipts retrieval message
- var query GetReceiptsPacket66
+ var query GetReceiptsPacket
if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
- response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket)
+ response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsRequest)
return peer.ReplyReceiptsRLP(query.RequestId, response)
}
// ServiceGetReceiptsQuery assembles the response to a receipt query. It is
// exposed to allow external packages to test protocol behavior.
-func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue {
+func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsRequest) []rlp.RawValue {
// Gather state data until the fetch or network limits is reached
var (
bytes int
@@ -351,15 +313,15 @@
return backend.Handle(peer, ann)
}
-func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
+func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
// A batch of headers arrived to one of our previous requests
- res := new(BlockHeadersPacket66)
+ res := new(BlockHeadersPacket)
if err := msg.Decode(res); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
metadata := func() interface{} {
- hashes := make([]common.Hash, len(res.BlockHeadersPacket))
- for i, header := range res.BlockHeadersPacket {
+ hashes := make([]common.Hash, len(res.BlockHeadersRequest))
+ for i, header := range res.BlockHeadersRequest {
hashes[i] = header.Hash()
}
return hashes
@@ -367,24 +329,24 @@ }
return peer.dispatchResponse(&Response{
id: res.RequestId,
code: BlockHeadersMsg,
- Res: &res.BlockHeadersPacket,
+ Res: &res.BlockHeadersRequest,
}, metadata)
}
-func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
+func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
// A batch of block bodies arrived to one of our previous requests
- res := new(BlockBodiesPacket66)
+ res := new(BlockBodiesPacket)
if err := msg.Decode(res); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
metadata := func() interface{} {
var (
- txsHashes = make([]common.Hash, len(res.BlockBodiesPacket))
- uncleHashes = make([]common.Hash, len(res.BlockBodiesPacket))
- withdrawalHashes = make([]common.Hash, len(res.BlockBodiesPacket))
+ txsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
+ uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse))
+ withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse))
)
hasher := trie.NewStackTrie(nil)
- for i, body := range res.BlockBodiesPacket {
+ for i, body := range res.BlockBodiesResponse {
txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher)
uncleHashes[i] = types.CalcUncleHash(body.Uncles)
if body.Withdrawals != nil {
@@ -396,33 +358,20 @@ }
return peer.dispatchResponse(&Response{
id: res.RequestId,
code: BlockBodiesMsg,
- Res: &res.BlockBodiesPacket,
+ Res: &res.BlockBodiesResponse,
}, metadata)
}
-func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error {
- // A batch of node state data arrived to one of our previous requests
- res := new(NodeDataPacket66)
- if err := msg.Decode(res); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- return peer.dispatchResponse(&Response{
- id: res.RequestId,
- code: NodeDataMsg,
- Res: &res.NodeDataPacket,
- }, nil) // No post-processing, we're not using this packet anymore
-}
-
-func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
+func handleReceipts(backend Backend, msg Decoder, peer *Peer) error {
// A batch of receipts arrived to one of our previous requests
- res := new(ReceiptsPacket66)
+ res := new(ReceiptsPacket)
if err := msg.Decode(res); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
metadata := func() interface{} {
hasher := trie.NewStackTrie(nil)
- hashes := make([]common.Hash, len(res.ReceiptsPacket))
- for i, receipt := range res.ReceiptsPacket {
+ hashes := make([]common.Hash, len(res.ReceiptsResponse))
+ for i, receipt := range res.ReceiptsResponse {
hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher)
}
return hashes
@@ -430,17 +379,17 @@ }
return peer.dispatchResponse(&Response{
id: res.RequestId,
code: ReceiptsMsg,
- Res: &res.ReceiptsPacket,
+ Res: &res.ReceiptsResponse,
}, metadata)
}
-func handleNewPooledTransactionHashes66(backend Backend, msg Decoder, peer *Peer) error {
+func handleNewPooledTransactionHashes67(backend Backend, msg Decoder, peer *Peer) error {
// New transaction announcement arrived, make sure we have
// a valid and fresh chain to handle them
if !backend.AcceptTxs() {
return nil
}
- ann := new(NewPooledTransactionHashesPacket66)
+ ann := new(NewPooledTransactionHashesPacket67)
if err := msg.Decode(ann); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
@@ -471,17 +420,17 @@ }
return backend.Handle(peer, ann)
}
-func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error {
+func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
// Decode the pooled transactions retrieval message
- var query GetPooledTransactionsPacket66
+ var query GetPooledTransactionsPacket
if err := msg.Decode(&query); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
- hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer)
+ hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsRequest)
return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs)
}
-func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPacket, peer *Peer) ([]common.Hash, []rlp.RawValue) {
+func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsRequest) ([]common.Hash, []rlp.RawValue) {
// Gather transactions until the fetch or network limits is reached
var (
bytes int
@@ -498,7 +447,7 @@ if tx == nil {
continue
}
// If known, encode and queue for response packet
- if encoded, err := rlp.EncodeToBytes(tx.Tx); err != nil {
+ if encoded, err := rlp.EncodeToBytes(tx); err != nil {
log.Error("Failed to encode transaction", "err", err)
} else {
hashes = append(hashes, hash)
@@ -529,17 +478,17 @@ }
return backend.Handle(peer, &txs)
}
-func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error {
+func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
// Transactions arrived, make sure we have a valid and fresh chain to handle them
if !backend.AcceptTxs() {
return nil
}
// Transactions can be processed, parse all of them and deliver to the pool
- var txs PooledTransactionsPacket66
+ var txs PooledTransactionsPacket
if err := msg.Decode(&txs); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
- for i, tx := range txs.PooledTransactionsPacket {
+ for i, tx := range txs.PooledTransactionsResponse {
// Validate and mark the remote transaction
if tx == nil {
return fmt.Errorf("%w: transaction %d is nil", errDecode, i)
@@ -548,5 +497,5 @@ peer.markTransaction(tx.Hash())
}
requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId)
- return backend.Handle(peer, &txs.PooledTransactionsPacket)
+ return backend.Handle(peer, &txs.PooledTransactionsResponse)
}
diff --git ethereum/go-ethereum/eth/protocols/eth/handshake_test.go taikoxyz/taiko-geth/eth/protocols/eth/handshake_test.go
index 5c6727d91cc76ebb1b1d52b5881ae02a70721cf4..d96cfc8165b5075d1a1b16c36a9e574197683067 100644
--- ethereum/go-ethereum/eth/protocols/eth/handshake_test.go
+++ taikoxyz/taiko-geth/eth/protocols/eth/handshake_test.go
@@ -27,7 +27,8 @@ "github.com/ethereum/go-ethereum/p2p/enode"
)
// Tests that handshake failures are detected and reported correctly.
-func TestHandshake66(t *testing.T) { testHandshake(t, ETH66) }
+func TestHandshake67(t *testing.T) { testHandshake(t, ETH67) }
+func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) }
func testHandshake(t *testing.T, protocol uint) {
t.Parallel()
@@ -40,7 +41,7 @@ var (
genesis = backend.chain.Genesis()
head = backend.chain.CurrentBlock()
td = backend.chain.GetTd(head.Hash(), head.Number.Uint64())
- forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time)
+ forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time)
)
tests := []struct {
code uint64
diff --git ethereum/go-ethereum/eth/protocols/eth/peer.go taikoxyz/taiko-geth/eth/protocols/eth/peer.go
index 219f486c8e6f2df2ab321042d48be02dfce090cf..938af0cab0df294ec354883a4daab515c6051de7 100644
--- ethereum/go-ethereum/eth/protocols/eth/peer.go
+++ taikoxyz/taiko-geth/eth/protocols/eth/peer.go
@@ -219,7 +219,7 @@ // not be managed directly.
func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error {
// Mark all the transactions as known, but ensure we don't overflow our limits
p.knownTxs.Add(hashes...)
- return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket66(hashes))
+ return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket67(hashes))
}
// sendPooledTransactionHashes68 sends transaction hashes (tagged with their type
@@ -248,15 +248,15 @@ p.Log().Debug("Dropping transaction announcement", "count", len(hashes))
}
}
-// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP.
+// ReplyPooledTransactionsRLP is the response to RequestTxs.
func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error {
// Mark all the transactions as known, but ensure we don't overflow our limits
p.knownTxs.Add(hashes...)
- // Not packed into PooledTransactionsPacket to avoid RLP decoding
- return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{
- RequestId: id,
- PooledTransactionsRLPPacket: txs,
+ // Not packed into PooledTransactionsResponse to avoid RLP decoding
+ return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket{
+ RequestId: id,
+ PooledTransactionsRLPResponse: txs,
})
}
@@ -309,36 +309,28 @@ p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
}
}
-// ReplyBlockHeadersRLP is the eth/66 response to GetBlockHeaders.
+// ReplyBlockHeadersRLP is the response to GetBlockHeaders.
func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error {
- return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket66{
- RequestId: id,
- BlockHeadersRLPPacket: headers,
+ return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket{
+ RequestId: id,
+ BlockHeadersRLPResponse: headers,
})
}
-// ReplyBlockBodiesRLP is the eth/66 response to GetBlockBodies.
+// ReplyBlockBodiesRLP is the response to GetBlockBodies.
func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
- // Not packed into BlockBodiesPacket to avoid RLP decoding
- return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{
- RequestId: id,
- BlockBodiesRLPPacket: bodies,
- })
-}
-
-// ReplyNodeData is the eth/66 response to GetNodeData.
-func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {
- return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{
- RequestId: id,
- NodeDataPacket: data,
+ // Not packed into BlockBodiesResponse to avoid RLP decoding
+ return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket{
+ RequestId: id,
+ BlockBodiesRLPResponse: bodies,
})
}
-// ReplyReceiptsRLP is the eth/66 response to GetReceipts.
+// ReplyReceiptsRLP is the response to GetReceipts.
func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
- return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{
- RequestId: id,
- ReceiptsRLPPacket: receipts,
+ return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket{
+ RequestId: id,
+ ReceiptsRLPResponse: receipts,
})
}
@@ -353,9 +345,9 @@ id: id,
sink: sink,
code: GetBlockHeadersMsg,
want: BlockHeadersMsg,
- data: &GetBlockHeadersPacket66{
+ data: &GetBlockHeadersPacket{
RequestId: id,
- GetBlockHeadersPacket: &GetBlockHeadersPacket{
+ GetBlockHeadersRequest: &GetBlockHeadersRequest{
Origin: HashOrNumber{Hash: hash},
Amount: uint64(1),
Skip: uint64(0),
@@ -380,9 +372,9 @@ id: id,
sink: sink,
code: GetBlockHeadersMsg,
want: BlockHeadersMsg,
- data: &GetBlockHeadersPacket66{
+ data: &GetBlockHeadersPacket{
RequestId: id,
- GetBlockHeadersPacket: &GetBlockHeadersPacket{
+ GetBlockHeadersRequest: &GetBlockHeadersRequest{
Origin: HashOrNumber{Hash: origin},
Amount: uint64(amount),
Skip: uint64(skip),
@@ -407,9 +399,9 @@ id: id,
sink: sink,
code: GetBlockHeadersMsg,
want: BlockHeadersMsg,
- data: &GetBlockHeadersPacket66{
+ data: &GetBlockHeadersPacket{
RequestId: id,
- GetBlockHeadersPacket: &GetBlockHeadersPacket{
+ GetBlockHeadersRequest: &GetBlockHeadersRequest{
Origin: HashOrNumber{Number: origin},
Amount: uint64(amount),
Skip: uint64(skip),
@@ -434,31 +426,9 @@ id: id,
sink: sink,
code: GetBlockBodiesMsg,
want: BlockBodiesMsg,
- data: &GetBlockBodiesPacket66{
- RequestId: id,
- GetBlockBodiesPacket: hashes,
- },
- }
- if err := p.dispatchRequest(req); err != nil {
- return nil, err
- }
- return req, nil
-}
-
-// RequestNodeData fetches a batch of arbitrary data from a node's known state
-// data, corresponding to the specified hashes.
-func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) {
- p.Log().Debug("Fetching batch of state data", "count", len(hashes))
- id := rand.Uint64()
-
- req := &Request{
- id: id,
- sink: sink,
- code: GetNodeDataMsg,
- want: NodeDataMsg,
- data: &GetNodeDataPacket66{
- RequestId: id,
- GetNodeDataPacket: hashes,
+ data: &GetBlockBodiesPacket{
+ RequestId: id,
+ GetBlockBodiesRequest: hashes,
},
}
if err := p.dispatchRequest(req); err != nil {
@@ -477,9 +447,9 @@ id: id,
sink: sink,
code: GetReceiptsMsg,
want: ReceiptsMsg,
- data: &GetReceiptsPacket66{
- RequestId: id,
- GetReceiptsPacket: hashes,
+ data: &GetReceiptsPacket{
+ RequestId: id,
+ GetReceiptsRequest: hashes,
},
}
if err := p.dispatchRequest(req); err != nil {
@@ -494,9 +464,9 @@ p.Log().Debug("Fetching batch of transactions", "count", len(hashes))
id := rand.Uint64()
requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id)
- return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{
- RequestId: id,
- GetPooledTransactionsPacket: hashes,
+ return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket{
+ RequestId: id,
+ GetPooledTransactionsRequest: hashes,
})
}
diff --git ethereum/go-ethereum/eth/protocols/eth/protocol.go taikoxyz/taiko-geth/eth/protocols/eth/protocol.go
index 4b9f5ad6ba5286963f7bd12a91475c5a940248e4..0f44f83de159cb38c430b9f0a0a37b70a02f33f6 100644
--- ethereum/go-ethereum/eth/protocols/eth/protocol.go
+++ taikoxyz/taiko-geth/eth/protocols/eth/protocol.go
@@ -30,7 +30,6 @@ )
// Constants to match up protocol versions and messages
const (
- ETH66 = 66
ETH67 = 67
ETH68 = 68
)
@@ -41,11 +40,11 @@ const ProtocolName = "eth"
// ProtocolVersions are the supported versions of the `eth` protocol (first
// is primary).
-var ProtocolVersions = []uint{ETH68, ETH67, ETH66}
+var ProtocolVersions = []uint{ETH68, ETH67}
// protocolLengths are the number of implemented message corresponding to
// different protocol versions.
-var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17, ETH66: 17}
+var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17}
// maxMessageSize is the maximum cap on the size of a protocol message.
const maxMessageSize = 10 * 1024 * 1024
@@ -62,8 +61,6 @@ NewBlockMsg = 0x07
NewPooledTransactionHashesMsg = 0x08
GetPooledTransactionsMsg = 0x09
PooledTransactionsMsg = 0x0a
- GetNodeDataMsg = 0x0d
- NodeDataMsg = 0x0e
GetReceiptsMsg = 0x0f
ReceiptsMsg = 0x10
)
@@ -85,7 +82,7 @@ Name() string // Name returns a string corresponding to the message type.
Kind() byte // Kind returns the message type.
}
-// StatusPacket is the network packet for the status message for eth/64 and later.
+// StatusPacket is the network packet for the status message.
type StatusPacket struct {
ProtocolVersion uint32
NetworkID uint64
@@ -118,18 +115,18 @@
// TransactionsPacket is the network packet for broadcasting new transactions.
type TransactionsPacket []*types.Transaction
-// GetBlockHeadersPacket represents a block header query.
-type GetBlockHeadersPacket struct {
+// GetBlockHeadersRequest represents a block header query.
+type GetBlockHeadersRequest struct {
Origin HashOrNumber // Block from which to retrieve headers
Amount uint64 // Maximum number of headers to retrieve
Skip uint64 // Blocks to skip between consecutive headers
Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis)
}
-// GetBlockHeadersPacket66 represents a block header query over eth/66
-type GetBlockHeadersPacket66 struct {
+// GetBlockHeadersPacket represents a block header query with request ID wrapping.
+type GetBlockHeadersPacket struct {
RequestId uint64
- *GetBlockHeadersPacket
+ *GetBlockHeadersRequest
}
// HashOrNumber is a combined field for specifying an origin block.
@@ -168,23 +165,23 @@ return fmt.Errorf("invalid input size %d for origin", size)
}
}
-// BlockHeadersPacket represents a block header response.
-type BlockHeadersPacket []*types.Header
+// BlockHeadersRequest represents a block header response.
+type BlockHeadersRequest []*types.Header
-// BlockHeadersPacket66 represents a block header response over eth/66.
-type BlockHeadersPacket66 struct {
+// BlockHeadersPacket represents a block header response over with request ID wrapping.
+type BlockHeadersPacket struct {
RequestId uint64
- BlockHeadersPacket
+ BlockHeadersRequest
}
-// BlockHeadersRLPPacket represents a block header response, to use when we already
+// BlockHeadersRLPResponse represents a block header response, to use when we already
// have the headers rlp encoded.
-type BlockHeadersRLPPacket []rlp.RawValue
+type BlockHeadersRLPResponse []rlp.RawValue
-// BlockHeadersRLPPacket66 represents a block header response over eth/66.
-type BlockHeadersRLPPacket66 struct {
+// BlockHeadersRLPPacket represents a block header response with request ID wrapping.
+type BlockHeadersRLPPacket struct {
RequestId uint64
- BlockHeadersRLPPacket
+ BlockHeadersRLPResponse
}
// NewBlockPacket is the network packet for the block propagation message.
@@ -206,33 +203,34 @@ }
return nil
}
-// GetBlockBodiesPacket represents a block body query.
-type GetBlockBodiesPacket []common.Hash
+// GetBlockBodiesRequest represents a block body query.
+type GetBlockBodiesRequest []common.Hash
-// GetBlockBodiesPacket66 represents a block body query over eth/66.
-type GetBlockBodiesPacket66 struct {
+// GetBlockBodiesPacket represents a block body query with request ID wrapping.
+type GetBlockBodiesPacket struct {
RequestId uint64
- GetBlockBodiesPacket
+ GetBlockBodiesRequest
}
-// BlockBodiesPacket is the network packet for block content distribution.
-type BlockBodiesPacket []*BlockBody
+// BlockBodiesResponse is the network packet for block content distribution.
+type BlockBodiesResponse []*BlockBody
-// BlockBodiesPacket66 is the network packet for block content distribution over eth/66.
-type BlockBodiesPacket66 struct {
+// BlockBodiesPacket is the network packet for block content distribution with
+// request ID wrapping.
+type BlockBodiesPacket struct {
RequestId uint64
- BlockBodiesPacket
+ BlockBodiesResponse
}
-// BlockBodiesRLPPacket is used for replying to block body requests, in cases
+// BlockBodiesRLPResponse is used for replying to block body requests, in cases
// where we already have them RLP-encoded, and thus can avoid the decode-encode
// roundtrip.
-type BlockBodiesRLPPacket []rlp.RawValue
+type BlockBodiesRLPResponse []rlp.RawValue
-// BlockBodiesRLPPacket66 is the BlockBodiesRLPPacket over eth/66
-type BlockBodiesRLPPacket66 struct {
+// BlockBodiesRLPPacket is the BlockBodiesRLPResponse with request ID wrapping.
+type BlockBodiesRLPPacket struct {
RequestId uint64
- BlockBodiesRLPPacket
+ BlockBodiesRLPResponse
}
// BlockBody represents the data content of a single block.
@@ -244,7 +242,7 @@ }
// Unpack retrieves the transactions and uncles from the range packet and returns
// them in a split flat format that's more consistent with the internal data structures.
-func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
+func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
// TODO(matt): add support for withdrawals to fetchers
var (
txset = make([][]*types.Transaction, len(*p))
@@ -257,53 +255,36 @@ }
return txset, uncleset, withdrawalset
}
-// GetNodeDataPacket represents a trie node data query.
-type GetNodeDataPacket []common.Hash
+// GetReceiptsRequest represents a block receipts query.
+type GetReceiptsRequest []common.Hash
-// GetNodeDataPacket66 represents a trie node data query over eth/66.
-type GetNodeDataPacket66 struct {
+// GetReceiptsPacket represents a block receipts query with request ID wrapping.
+type GetReceiptsPacket struct {
RequestId uint64
- GetNodeDataPacket
+ GetReceiptsRequest
}
-// NodeDataPacket is the network packet for trie node data distribution.
-type NodeDataPacket [][]byte
+// ReceiptsResponse is the network packet for block receipts distribution.
+type ReceiptsResponse [][]*types.Receipt
-// NodeDataPacket66 is the network packet for trie node data distribution over eth/66.
-type NodeDataPacket66 struct {
+// ReceiptsPacket is the network packet for block receipts distribution with
+// request ID wrapping.
+type ReceiptsPacket struct {
RequestId uint64
- NodeDataPacket
+ ReceiptsResponse
}
-// GetReceiptsPacket represents a block receipts query.
-type GetReceiptsPacket []common.Hash
+// ReceiptsRLPResponse is used for receipts, when we already have it encoded
+type ReceiptsRLPResponse []rlp.RawValue
-// GetReceiptsPacket66 represents a block receipts query over eth/66.
-type GetReceiptsPacket66 struct {
+// ReceiptsRLPPacket is ReceiptsRLPResponse with request ID wrapping.
+type ReceiptsRLPPacket struct {
RequestId uint64
- GetReceiptsPacket
+ ReceiptsRLPResponse
}
-// ReceiptsPacket is the network packet for block receipts distribution.
-type ReceiptsPacket [][]*types.Receipt
-
-// ReceiptsPacket66 is the network packet for block receipts distribution over eth/66.
-type ReceiptsPacket66 struct {
- RequestId uint64
- ReceiptsPacket
-}
-
-// ReceiptsRLPPacket is used for receipts, when we already have it encoded
-type ReceiptsRLPPacket []rlp.RawValue
-
-// ReceiptsRLPPacket66 is the eth-66 version of ReceiptsRLPPacket
-type ReceiptsRLPPacket66 struct {
- RequestId uint64
- ReceiptsRLPPacket
-}
-
-// NewPooledTransactionHashesPacket66 represents a transaction announcement packet on eth/66 and eth/67.
-type NewPooledTransactionHashesPacket66 []common.Hash
+// NewPooledTransactionHashesPacket67 represents a transaction announcement packet on eth/67.
+type NewPooledTransactionHashesPacket67 []common.Hash
// NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer.
type NewPooledTransactionHashesPacket68 struct {
@@ -312,31 +293,33 @@ Sizes []uint32
Hashes []common.Hash
}
-// GetPooledTransactionsPacket represents a transaction query.
-type GetPooledTransactionsPacket []common.Hash
+// GetPooledTransactionsRequest represents a transaction query.
+type GetPooledTransactionsRequest []common.Hash
-type GetPooledTransactionsPacket66 struct {
+// GetPooledTransactionsPacket represents a transaction query with request ID wrapping.
+type GetPooledTransactionsPacket struct {
RequestId uint64
- GetPooledTransactionsPacket
+ GetPooledTransactionsRequest
}
-// PooledTransactionsPacket is the network packet for transaction distribution.
-type PooledTransactionsPacket []*types.Transaction
+// PooledTransactionsResponse is the network packet for transaction distribution.
+type PooledTransactionsResponse []*types.Transaction
-// PooledTransactionsPacket66 is the network packet for transaction distribution over eth/66.
-type PooledTransactionsPacket66 struct {
+// PooledTransactionsPacket is the network packet for transaction distribution
+// with request ID wrapping.
+type PooledTransactionsPacket struct {
RequestId uint64
- PooledTransactionsPacket
+ PooledTransactionsResponse
}
-// PooledTransactionsRLPPacket is the network packet for transaction distribution, used
+// PooledTransactionsRLPResponse is the network packet for transaction distribution, used
// in the cases we already have them in rlp-encoded form
-type PooledTransactionsRLPPacket []rlp.RawValue
+type PooledTransactionsRLPResponse []rlp.RawValue
-// PooledTransactionsRLPPacket66 is the eth/66 form of PooledTransactionsRLPPacket
-type PooledTransactionsRLPPacket66 struct {
+// PooledTransactionsRLPPacket is PooledTransactionsRLPResponse with request ID wrapping.
+type PooledTransactionsRLPPacket struct {
RequestId uint64
- PooledTransactionsRLPPacket
+ PooledTransactionsRLPResponse
}
func (*StatusPacket) Name() string { return "Status" }
@@ -348,40 +331,34 @@
func (*TransactionsPacket) Name() string { return "Transactions" }
func (*TransactionsPacket) Kind() byte { return TransactionsMsg }
-func (*GetBlockHeadersPacket) Name() string { return "GetBlockHeaders" }
-func (*GetBlockHeadersPacket) Kind() byte { return GetBlockHeadersMsg }
+func (*GetBlockHeadersRequest) Name() string { return "GetBlockHeaders" }
+func (*GetBlockHeadersRequest) Kind() byte { return GetBlockHeadersMsg }
-func (*BlockHeadersPacket) Name() string { return "BlockHeaders" }
-func (*BlockHeadersPacket) Kind() byte { return BlockHeadersMsg }
+func (*BlockHeadersRequest) Name() string { return "BlockHeaders" }
+func (*BlockHeadersRequest) Kind() byte { return BlockHeadersMsg }
-func (*GetBlockBodiesPacket) Name() string { return "GetBlockBodies" }
-func (*GetBlockBodiesPacket) Kind() byte { return GetBlockBodiesMsg }
+func (*GetBlockBodiesRequest) Name() string { return "GetBlockBodies" }
+func (*GetBlockBodiesRequest) Kind() byte { return GetBlockBodiesMsg }
-func (*BlockBodiesPacket) Name() string { return "BlockBodies" }
-func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg }
+func (*BlockBodiesResponse) Name() string { return "BlockBodies" }
+func (*BlockBodiesResponse) Kind() byte { return BlockBodiesMsg }
func (*NewBlockPacket) Name() string { return "NewBlock" }
func (*NewBlockPacket) Kind() byte { return NewBlockMsg }
-func (*NewPooledTransactionHashesPacket66) Name() string { return "NewPooledTransactionHashes" }
-func (*NewPooledTransactionHashesPacket66) Kind() byte { return NewPooledTransactionHashesMsg }
+func (*NewPooledTransactionHashesPacket67) Name() string { return "NewPooledTransactionHashes" }
+func (*NewPooledTransactionHashesPacket67) Kind() byte { return NewPooledTransactionHashesMsg }
func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" }
func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg }
-func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" }
-func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg }
-
-func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" }
-func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg }
-
-func (*GetNodeDataPacket) Name() string { return "GetNodeData" }
-func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg }
+func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" }
+func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg }
-func (*NodeDataPacket) Name() string { return "NodeData" }
-func (*NodeDataPacket) Kind() byte { return NodeDataMsg }
+func (*PooledTransactionsResponse) Name() string { return "PooledTransactions" }
+func (*PooledTransactionsResponse) Kind() byte { return PooledTransactionsMsg }
-func (*GetReceiptsPacket) Name() string { return "GetReceipts" }
-func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg }
+func (*GetReceiptsRequest) Name() string { return "GetReceipts" }
+func (*GetReceiptsRequest) Kind() byte { return GetReceiptsMsg }
-func (*ReceiptsPacket) Name() string { return "Receipts" }
-func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg }
+func (*ReceiptsResponse) Name() string { return "Receipts" }
+func (*ReceiptsResponse) Kind() byte { return ReceiptsMsg }
diff --git ethereum/go-ethereum/eth/protocols/eth/protocol_test.go taikoxyz/taiko-geth/eth/protocols/eth/protocol_test.go
index a86fbb0a6906ae9480e6d94c18c541fd79af8cbd..bc2545dea286a6b8cd633dd5dc54866d7175162f 100644
--- ethereum/go-ethereum/eth/protocols/eth/protocol_test.go
+++ taikoxyz/taiko-geth/eth/protocols/eth/protocol_test.go
@@ -35,19 +35,19 @@ hash[i] = byte(i)
}
// Assemble some table driven tests
tests := []struct {
- packet *GetBlockHeadersPacket
+ packet *GetBlockHeadersRequest
fail bool
}{
// Providing the origin as either a hash or a number should both work
- {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}},
- {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}},
+ {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}}},
+ {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}}},
// Providing arbitrary query field should also work
- {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
- {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
+ {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
+ {fail: false, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
// Providing both the origin hash and origin number must fail
- {fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}},
+ {fail: true, packet: &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: hash, Number: 314}}},
}
// Iterate over each of the tests and try to encode and then decode
for i, tt := range tests {
@@ -58,7 +58,7 @@ } else if err == nil && tt.fail {
t.Fatalf("test %d: encode should have failed", i)
}
if !tt.fail {
- packet := new(GetBlockHeadersPacket)
+ packet := new(GetBlockHeadersRequest)
if err := rlp.DecodeBytes(bytes, packet); err != nil {
t.Fatalf("test %d: failed to decode packet: %v", i, err)
}
@@ -70,46 +70,40 @@ }
}
}
-// TestEth66EmptyMessages tests encoding of empty eth66 messages
-func TestEth66EmptyMessages(t *testing.T) {
+// TestEmptyMessages tests encoding of empty messages.
+func TestEmptyMessages(t *testing.T) {
// All empty messages encodes to the same format
want := common.FromHex("c4820457c0")
for i, msg := range []interface{}{
// Headers
- GetBlockHeadersPacket66{1111, nil},
- BlockHeadersPacket66{1111, nil},
+ GetBlockHeadersPacket{1111, nil},
+ BlockHeadersPacket{1111, nil},
// Bodies
- GetBlockBodiesPacket66{1111, nil},
- BlockBodiesPacket66{1111, nil},
- BlockBodiesRLPPacket66{1111, nil},
- // Node data
- GetNodeDataPacket66{1111, nil},
- NodeDataPacket66{1111, nil},
+ GetBlockBodiesPacket{1111, nil},
+ BlockBodiesPacket{1111, nil},
+ BlockBodiesRLPPacket{1111, nil},
// Receipts
- GetReceiptsPacket66{1111, nil},
- ReceiptsPacket66{1111, nil},
+ GetReceiptsPacket{1111, nil},
+ ReceiptsPacket{1111, nil},
// Transactions
- GetPooledTransactionsPacket66{1111, nil},
- PooledTransactionsPacket66{1111, nil},
- PooledTransactionsRLPPacket66{1111, nil},
+ GetPooledTransactionsPacket{1111, nil},
+ PooledTransactionsPacket{1111, nil},
+ PooledTransactionsRLPPacket{1111, nil},
// Headers
- BlockHeadersPacket66{1111, BlockHeadersPacket([]*types.Header{})},
+ BlockHeadersPacket{1111, BlockHeadersRequest([]*types.Header{})},
// Bodies
- GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})},
- BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})},
- BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})},
- // Node data
- GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})},
- NodeDataPacket66{1111, NodeDataPacket([][]byte{})},
+ GetBlockBodiesPacket{1111, GetBlockBodiesRequest([]common.Hash{})},
+ BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{})},
+ BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{})},
// Receipts
- GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})},
- ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})},
+ GetReceiptsPacket{1111, GetReceiptsRequest([]common.Hash{})},
+ ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{})},
// Transactions
- GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})},
- PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})},
- PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})},
+ GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest([]common.Hash{})},
+ PooledTransactionsPacket{1111, PooledTransactionsResponse([]*types.Transaction{})},
+ PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse([]rlp.RawValue{})},
} {
if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) {
t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want)
@@ -117,8 +111,8 @@ }
}
}
-// TestEth66Messages tests the encoding of all redefined eth66 messages
-func TestEth66Messages(t *testing.T) {
+// TestMessages tests the encoding of all messages.
+func TestMessages(t *testing.T) {
// Some basic structs used during testing
var (
header *types.Header
@@ -169,10 +163,6 @@ hashes = []common.Hash{
common.HexToHash("deadc0de"),
common.HexToHash("feedbeef"),
}
- byteSlices := [][]byte{
- common.FromHex("deadc0de"),
- common.FromHex("feedbeef"),
- }
// init the receipts
{
receipts = []*types.Receipt{
@@ -203,59 +193,51 @@ message interface{}
want []byte
}{
{
- GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{hashes[0], 0}, 5, 5, false}},
+ GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{hashes[0], 0}, 5, 5, false}},
common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"),
},
{
- GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}},
+ GetBlockHeadersPacket{1111, &GetBlockHeadersRequest{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}},
common.FromHex("ca820457c682270f050580"),
},
{
- BlockHeadersPacket66{1111, BlockHeadersPacket{header}},
+ BlockHeadersPacket{1111, BlockHeadersRequest{header}},
common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
},
{
- GetBlockBodiesPacket66{1111, GetBlockBodiesPacket(hashes)},
+ GetBlockBodiesPacket{1111, GetBlockBodiesRequest(hashes)},
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
},
{
- BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{blockBody})},
+ BlockBodiesPacket{1111, BlockBodiesResponse([]*BlockBody{blockBody})},
common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
},
{ // Identical to non-rlp-shortcut version
- BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})},
+ BlockBodiesRLPPacket{1111, BlockBodiesRLPResponse([]rlp.RawValue{blockBodyRlp})},
common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"),
},
{
- GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)},
+ GetReceiptsPacket{1111, GetReceiptsRequest(hashes)},
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
},
{
- NodeDataPacket66{1111, NodeDataPacket(byteSlices)},
- common.FromHex("ce820457ca84deadc0de84feedbeef"),
- },
- {
- GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)},
- common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
- },
- {
- ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{receipts})},
+ ReceiptsPacket{1111, ReceiptsResponse([][]*types.Receipt{receipts})},
common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"),
},
{
- ReceiptsRLPPacket66{1111, ReceiptsRLPPacket([]rlp.RawValue{receiptsRlp})},
+ ReceiptsRLPPacket{1111, ReceiptsRLPResponse([]rlp.RawValue{receiptsRlp})},
common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"),
},
{
- GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket(hashes)},
+ GetPooledTransactionsPacket{1111, GetPooledTransactionsRequest(hashes)},
common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"),
},
{
- PooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)},
+ PooledTransactionsPacket{1111, PooledTransactionsResponse(txs)},
common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"),
},
{
- PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket(txRlps)},
+ PooledTransactionsRLPPacket{1111, PooledTransactionsRLPResponse(txRlps)},
common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"),
},
} {
diff --git ethereum/go-ethereum/eth/protocols/snap/handler.go taikoxyz/taiko-geth/eth/protocols/snap/handler.go
index 1c6d80d354ce23027977070b90d42e4839e535a1..bd7ce9e71543c944c1a8e1ff2dd2dfcaa95d236f 100644
--- ethereum/go-ethereum/eth/protocols/snap/handler.go
+++ taikoxyz/taiko-geth/eth/protocols/snap/handler.go
@@ -24,13 +24,13 @@
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
const (
@@ -284,7 +284,7 @@ if req.Bytes > softResponseLimit {
req.Bytes = softResponseLimit
}
// Retrieve the requested state and bail out if non existent
- tr, err := trie.New(trie.StateTrieID(req.Root), chain.StateCache().TrieDB())
+ tr, err := trie.New(trie.StateTrieID(req.Root), chain.TrieDB())
if err != nil {
return nil, nil
}
@@ -321,7 +321,7 @@ }
it.Release()
// Generate the Merkle proofs for the first and last account
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
if err := tr.Prove(req.Origin[:], proof); err != nil {
log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
return nil, nil
@@ -333,7 +333,7 @@ return nil, nil
}
}
var proofs [][]byte
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
return accounts, proofs
@@ -367,7 +367,7 @@ var origin common.Hash
if len(req.Origin) > 0 {
origin, req.Origin = common.BytesToHash(req.Origin), nil
}
- var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ var limit = common.MaxHash
if len(req.Limit) > 0 {
limit, req.Limit = common.BytesToHash(req.Limit), nil
}
@@ -414,7 +414,7 @@ // in the response, no need for any proofs.
if origin != (common.Hash{}) || (abort && len(storage) > 0) {
// Request started at a non-zero hash or was capped prematurely, add
// the endpoint Merkle proofs
- accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.StateCache().TrieDB())
+ accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.TrieDB())
if err != nil {
return nil, nil
}
@@ -423,11 +423,11 @@ if err != nil || acc == nil {
return nil, nil
}
id := trie.StorageTrieID(req.Root, account, acc.Root)
- stTrie, err := trie.NewStateTrie(id, chain.StateCache().TrieDB())
+ stTrie, err := trie.NewStateTrie(id, chain.TrieDB())
if err != nil {
return nil, nil
}
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
if err := stTrie.Prove(origin[:], proof); err != nil {
log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
return nil, nil
@@ -438,7 +438,7 @@ log.Warn("Failed to prove storage range", "last", last, "err", err)
return nil, nil
}
}
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
// Proof terminates the reply as proofs are only added if a node
@@ -487,7 +487,7 @@ if req.Bytes > softResponseLimit {
req.Bytes = softResponseLimit
}
// Make sure we have the state associated with the request
- triedb := chain.StateCache().TrieDB()
+ triedb := chain.TrieDB()
accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), triedb)
if err != nil {
diff --git ethereum/go-ethereum/eth/protocols/snap/range.go taikoxyz/taiko-geth/eth/protocols/snap/range.go
index 2627cb954b8fb69442a00058ed2707e66c747f0d..8c98c71d5064207a313053b8ff101b8709e494f5 100644
--- ethereum/go-ethereum/eth/protocols/snap/range.go
+++ taikoxyz/taiko-geth/eth/protocols/snap/range.go
@@ -67,7 +67,7 @@ func (r *hashRange) End() common.Hash {
// If the end overflows (non divisible range), return a shorter interval
next, overflow := new(uint256.Int).AddOverflow(r.current, r.step)
if overflow {
- return common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ return common.MaxHash
}
return next.SubUint64(next, 1).Bytes32()
}
diff --git ethereum/go-ethereum/eth/protocols/snap/range_test.go taikoxyz/taiko-geth/eth/protocols/snap/range_test.go
index 3461439e54bdf85e2572227da632d7131c551408..ea643f13612f9e3e577bffd4d7185bd13ca36bc8 100644
--- ethereum/go-ethereum/eth/protocols/snap/range_test.go
+++ taikoxyz/taiko-geth/eth/protocols/snap/range_test.go
@@ -45,7 +45,7 @@ ends: []common.Hash{
common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
- common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+ common.MaxHash,
},
},
// Split a divisible part of the hash range up into 2 chunks
@@ -58,7 +58,7 @@ common.HexToHash("0x9000000000000000000000000000000000000000000000000000000000000000"),
},
ends: []common.Hash{
common.HexToHash("0x8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
- common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+ common.MaxHash,
},
},
// Split the entire hash range into a non divisible 3 chunks
@@ -73,7 +73,7 @@ },
ends: []common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"),
- common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+ common.MaxHash,
},
},
// Split a part of hash range into a non divisible 3 chunks
@@ -88,7 +88,7 @@ },
ends: []common.Hash{
common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555555"),
- common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+ common.MaxHash,
},
},
// Split a part of hash range into a non divisible 3 chunks, but with a
@@ -108,7 +108,7 @@ },
ends: []common.Hash{
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5"),
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb"),
- common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+ common.MaxHash,
},
},
}
diff --git ethereum/go-ethereum/eth/protocols/snap/sync.go taikoxyz/taiko-geth/eth/protocols/snap/sync.go
index 0f5f2ccdfeb97d18b6e9bb1c7a2f7584f2db5b6e..22638d04735fcde1e772d2914467ed28c6a50bbd 100644
--- ethereum/go-ethereum/eth/protocols/snap/sync.go
+++ taikoxyz/taiko-geth/eth/protocols/snap/sync.go
@@ -37,11 +37,11 @@ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/msgrate"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -738,8 +738,8 @@ OnPut: func(key []byte, value []byte) {
s.accountBytes += common.StorageSize(len(key) + len(value))
},
}
- task.genTrie = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(task.genBatch, owner, path, hash, val, s.scheme)
+ task.genTrie = trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
+ rawdb.WriteTrieNode(task.genBatch, common.Hash{}, path, hash, val, s.scheme)
})
for accountHash, subtasks := range task.SubTasks {
for _, subtask := range subtasks {
@@ -751,9 +751,10 @@ OnPut: func(key []byte, value []byte) {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
- subtask.genTrie = trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ owner := accountHash // local assignment for stacktrie writer closure
+ subtask.genTrie = trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, val, s.scheme)
- }, accountHash)
+ })
}
}
}
@@ -797,7 +798,7 @@ for i := 0; i < accountConcurrency; i++ {
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
if i == accountConcurrency-1 {
// Make sure we don't overflow if the step is not a proper divisor
- last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ last = common.MaxHash
}
batch := ethdb.HookedBatch{
Batch: s.db.NewBatch(),
@@ -810,8 +811,8 @@ Next: next,
Last: last,
SubTasks: make(map[common.Hash][]*storageTask),
genBatch: batch,
- genTrie: trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
+ genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
+ rawdb.WriteTrieNode(batch, common.Hash{}, path, hash, val, s.scheme)
}),
})
log.Debug("Created account sync task", "from", next, "last", last)
@@ -1873,7 +1874,7 @@ s.forwardAccountTask(res.task)
return
}
// Some accounts are incomplete, leave as is for the storage and contract
- // task assigners to pick up and fill.
+ // task assigners to pick up and fill
}
// processBytecodeResponse integrates an already validated bytecode response
@@ -2004,14 +2005,15 @@ OnPut: func(key []byte, value []byte) {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
+ owner := account // local assignment for stacktrie writer closure
tasks = append(tasks, &storageTask{
Next: common.Hash{},
Last: r.End(),
root: acc.Root,
genBatch: batch,
- genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
- }, account),
+ }),
})
for r.Next() {
batch := ethdb.HookedBatch{
@@ -2025,9 +2027,9 @@ Next: r.Start(),
Last: r.End(),
root: acc.Root,
genBatch: batch,
- genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ genTrie: trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
- }, account),
+ }),
})
}
for _, task := range tasks {
@@ -2072,9 +2074,10 @@ // reconstructed later.
slots += len(res.hashes[i])
if i < len(res.hashes)-1 || res.subTask == nil {
- tr := trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
- }, account)
+ // no need to make local reassignment of account: this closure does not outlive the loop
+ tr := trie.NewStackTrie(func(path []byte, hash common.Hash, val []byte) {
+ rawdb.WriteTrieNode(batch, account, path, hash, val, s.scheme)
+ })
for j := 0; j < len(res.hashes[i]); j++ {
tr.Update(res.hashes[i][j][:], res.slots[i][j])
}
@@ -2394,17 +2397,11 @@ keys := make([][]byte, len(hashes))
for i, key := range hashes {
keys[i] = common.CopyBytes(key[:])
}
- nodes := make(light.NodeList, len(proof))
+ nodes := make(trienode.ProofList, len(proof))
for i, node := range proof {
nodes[i] = node
}
- proofdb := nodes.NodeSet()
-
- var end []byte
- if len(keys) > 0 {
- end = keys[len(keys)-1]
- }
- cont, err := trie.VerifyRangeProof(root, req.origin[:], end, keys, accounts, proofdb)
+ cont, err := trie.VerifyRangeProof(root, req.origin[:], keys, accounts, nodes.Set())
if err != nil {
logger.Warn("Account range failed proof", "err", err)
// Signal this request as failed, and ready for rescheduling
@@ -2621,7 +2618,7 @@ // Response is valid, but check if peer is signalling that it does not have
// the requested data. For storage range queries that means the state being
// retrieved was either already pruned remotely, or the peer is not yet
// synced to our head.
- if len(hashes) == 0 {
+ if len(hashes) == 0 && len(proof) == 0 {
logger.Debug("Peer rejected storage request")
s.statelessPeers[peer.ID()] = struct{}{}
s.lock.Unlock()
@@ -2633,13 +2630,20 @@
// Reconstruct the partial tries from the response and verify them
var cont bool
+ // If a proof was attached while the response is empty, it indicates that the
+ // requested range specified with 'origin' is empty. Construct an empty state
+ // response locally to finalize the range.
+ if len(hashes) == 0 && len(proof) > 0 {
+ hashes = append(hashes, []common.Hash{})
+ slots = append(slots, [][]byte{})
+ }
for i := 0; i < len(hashes); i++ {
// Convert the keys and proofs into an internal format
keys := make([][]byte, len(hashes[i]))
for j, key := range hashes[i] {
keys[j] = common.CopyBytes(key[:])
}
- nodes := make(light.NodeList, 0, len(proof))
+ nodes := make(trienode.ProofList, 0, len(proof))
if i == len(hashes)-1 {
for _, node := range proof {
nodes = append(nodes, node)
@@ -2649,7 +2653,7 @@ var err error
if len(nodes) == 0 {
// No proof has been attached, the response must cover the entire key
// space and hash to the origin root.
- _, err = trie.VerifyRangeProof(req.roots[i], nil, nil, keys, slots[i], nil)
+ _, err = trie.VerifyRangeProof(req.roots[i], nil, keys, slots[i], nil)
if err != nil {
s.scheduleRevertStorageRequest(req) // reschedule request
logger.Warn("Storage slots failed proof", "err", err)
@@ -2658,13 +2662,9 @@ }
} else {
// A proof was attached, the response is only partial, check that the
// returned data is indeed part of the storage trie
- proofdb := nodes.NodeSet()
+ proofdb := nodes.Set()
- var end []byte
- if len(keys) > 0 {
- end = keys[len(keys)-1]
- }
- cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], end, keys, slots[i], proofdb)
+ cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], keys, slots[i], proofdb)
if err != nil {
s.scheduleRevertStorageRequest(req) // reschedule request
logger.Warn("Storage range failed proof", "err", err)
diff --git ethereum/go-ethereum/eth/protocols/snap/sync_test.go taikoxyz/taiko-geth/eth/protocols/snap/sync_test.go
index 0aa6fd8730dd8090e2e0e3eece709a9e544e0659..5d4099a8140ef229243fdf82951bae77052446a0 100644
--- ethereum/go-ethereum/eth/protocols/snap/sync_test.go
+++ taikoxyz/taiko-geth/eth/protocols/snap/sync_test.go
@@ -22,6 +22,7 @@ "crypto/rand"
"encoding/binary"
"fmt"
"math/big"
+ mrand "math/rand"
"sync"
"testing"
"time"
@@ -31,10 +32,11 @@ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/testutil"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
"golang.org/x/exp/slices"
@@ -253,7 +255,7 @@
func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
var size uint64
if limit == (common.Hash{}) {
- limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ limit = common.MaxHash
}
for _, entry := range t.accountValues {
if size > cap {
@@ -272,7 +274,7 @@ }
// Unless we send the entire trie, we need to supply proofs
// Actually, we need to supply proofs either way! This seems to be an implementation
// quirk in go-ethereum
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
if err := t.accountTrie.Prove(origin[:], proof); err != nil {
t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
}
@@ -282,7 +284,7 @@ if err := t.accountTrie.Prove(lastK, proof); err != nil {
t.logger.Error("Could not prove last item", "error", err)
}
}
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
return keys, vals, proofs
@@ -318,7 +320,7 @@ var originHash common.Hash
if len(origin) > 0 {
originHash = common.BytesToHash(origin)
}
- var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ var limitHash = common.MaxHash
if len(limit) > 0 {
limitHash = common.BytesToHash(limit)
}
@@ -352,7 +354,7 @@ // in the response, no need for any proofs.
if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
stTrie := t.storageTries[account]
// Here's a potential gotcha: when constructing the proof, we cannot
@@ -367,7 +369,7 @@ if err := stTrie.Prove(lastK, proof); err != nil {
t.logger.Error("Could not prove last item", "error", err)
}
}
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
break
@@ -410,7 +412,7 @@
if exit {
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
stTrie := t.storageTries[account]
// Here's a potential gotcha: when constructing the proof, we cannot
@@ -426,7 +428,7 @@ if err := stTrie.Prove(lastK, proof); err != nil {
t.logger.Error("Could not prove last item", "error", err)
}
}
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
break
@@ -561,6 +563,11 @@ // the remote side does not do any follow-up requests
func TestSyncBloatedProof(t *testing.T) {
t.Parallel()
+ testSyncBloatedProof(t, rawdb.HashScheme)
+ testSyncBloatedProof(t, rawdb.PathScheme)
+}
+
+func testSyncBloatedProof(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -570,7 +577,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
source := newTestPeer("source", t, term)
source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
@@ -593,8 +600,9 @@ keys = append(keys, common.BytesToHash(entry.k))
vals = append(vals, entry.v)
}
// The proofs
- proof := light.NewNodeSet()
+ proof := trienode.NewProofSet()
if err := t.accountTrie.Prove(origin[:], proof); err != nil {
+ t.logger.Error("Could not prove origin", "origin", origin, "error", err)
t.logger.Error("Could not prove origin", "origin", origin, "error", err)
}
// The bloat: add proof of every single element
@@ -608,7 +616,7 @@ if len(keys) > 2 {
keys = append(keys[:1], keys[2:]...)
vals = append(vals[:1], vals[2:]...)
}
- for _, blob := range proof.NodeList() {
+ for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
@@ -638,6 +646,11 @@ // TestSync tests a basic sync with one peer
func TestSync(t *testing.T) {
t.Parallel()
+ testSync(t, rawdb.HashScheme)
+ testSync(t, rawdb.PathScheme)
+}
+
+func testSync(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -647,7 +660,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -659,7 +672,7 @@ syncer := setupSyncer(nodeScheme, mkSource("source"))
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
@@ -667,6 +680,11 @@ // panic within the prover
func TestSyncTinyTriePanic(t *testing.T) {
t.Parallel()
+ testSyncTinyTriePanic(t, rawdb.HashScheme)
+ testSyncTinyTriePanic(t, rawdb.PathScheme)
+}
+
+func testSyncTinyTriePanic(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -676,7 +694,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1, scheme)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -690,13 +708,18 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSync tests a basic sync with multiple peers
func TestMultiSync(t *testing.T) {
t.Parallel()
+ testMultiSync(t, rawdb.HashScheme)
+ testMultiSync(t, rawdb.PathScheme)
+}
+
+func testMultiSync(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -706,7 +729,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -720,13 +743,18 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorage tests basic sync using accounts + storage + code
func TestSyncWithStorage(t *testing.T) {
t.Parallel()
+ testSyncWithStorage(t, rawdb.HashScheme)
+ testSyncWithStorage(t, rawdb.PathScheme)
+}
+
+func testSyncWithStorage(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -736,7 +764,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
+ sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -746,19 +774,24 @@ source.setStorageTries(storageTries)
source.storageValues = storageElems
return source
}
- syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
+ syncer := setupSyncer(scheme, mkSource("sourceA"))
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUseless(t *testing.T) {
t.Parallel()
+ testMultiSyncManyUseless(t, rawdb.HashScheme)
+ testMultiSyncManyUseless(t, rawdb.PathScheme)
+}
+
+func testMultiSyncManyUseless(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -768,7 +801,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
@@ -790,7 +823,7 @@ return source
}
syncer := setupSyncer(
- nodeScheme,
+ scheme,
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
@@ -801,11 +834,18 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
+ t.Parallel()
+
+ testMultiSyncManyUselessWithLowTimeout(t, rawdb.HashScheme)
+ testMultiSyncManyUselessWithLowTimeout(t, rawdb.PathScheme)
+}
+
+func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -815,7 +855,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
@@ -837,7 +877,7 @@ return source
}
syncer := setupSyncer(
- nodeScheme,
+ scheme,
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
@@ -853,11 +893,18 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
func TestMultiSyncManyUnresponsive(t *testing.T) {
+ t.Parallel()
+
+ testMultiSyncManyUnresponsive(t, rawdb.HashScheme)
+ testMultiSyncManyUnresponsive(t, rawdb.PathScheme)
+}
+
+func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -867,7 +914,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
@@ -889,7 +936,7 @@ return source
}
syncer := setupSyncer(
- nodeScheme,
+ scheme,
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
@@ -903,7 +950,7 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
func checkStall(t *testing.T, term func()) chan struct{} {
@@ -925,6 +972,11 @@ // account trie has a few boundary elements.
func TestSyncBoundaryAccountTrie(t *testing.T) {
t.Parallel()
+ testSyncBoundaryAccountTrie(t, rawdb.HashScheme)
+ testSyncBoundaryAccountTrie(t, rawdb.PathScheme)
+}
+
+func testSyncBoundaryAccountTrie(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -934,7 +986,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
+ nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(scheme, 3000)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -952,7 +1004,7 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
@@ -960,6 +1012,11 @@ // consistently returning very small results
func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
t.Parallel()
+ testSyncNoStorageAndOneCappedPeer(t, rawdb.HashScheme)
+ testSyncNoStorageAndOneCappedPeer(t, rawdb.PathScheme)
+}
+
+func testSyncNoStorageAndOneCappedPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -969,7 +1026,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, term)
@@ -994,7 +1051,7 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
@@ -1002,6 +1059,11 @@ // code requests properly.
func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
t.Parallel()
+ testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.HashScheme)
+ testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.PathScheme)
+}
+
+func testSyncNoStorageAndOneCodeCorruptPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1011,7 +1073,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1034,12 +1096,17 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
t.Parallel()
+ testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.HashScheme)
+ testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.PathScheme)
+}
+
+func testSyncNoStorageAndOneAccountCorruptPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1049,7 +1116,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1072,7 +1139,7 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
@@ -1080,6 +1147,11 @@ // one by one
func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
t.Parallel()
+ testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.HashScheme)
+ testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.PathScheme)
+}
+
+func testSyncNoStorageAndOneCodeCappedPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1089,7 +1161,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme)
mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1123,7 +1195,7 @@ // the number can be flaky, so don't limit it so strictly.
if threshold := 100; counter > threshold {
t.Logf("Error, expected < %d invocations, got %d", threshold, counter)
}
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
@@ -1131,6 +1203,11 @@ // storage trie has a few boundary elements.
func TestSyncBoundaryStorageTrie(t *testing.T) {
t.Parallel()
+ testSyncBoundaryStorageTrie(t, rawdb.HashScheme)
+ testSyncBoundaryStorageTrie(t, rawdb.PathScheme)
+}
+
+func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1140,7 +1217,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
+ sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -1151,7 +1228,7 @@ source.storageValues = storageElems
return source
}
syncer := setupSyncer(
- nodeScheme,
+ scheme,
mkSource("peer-a"),
mkSource("peer-b"),
)
@@ -1160,7 +1237,7 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
@@ -1168,6 +1245,11 @@ // consistently returning very small results
func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
t.Parallel()
+ testSyncWithStorageAndOneCappedPeer(t, rawdb.HashScheme)
+ testSyncWithStorageAndOneCappedPeer(t, rawdb.PathScheme)
+}
+
+func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1177,7 +1259,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
+ sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false, false)
mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, term)
@@ -1193,7 +1275,7 @@ return source
}
syncer := setupSyncer(
- nodeScheme,
+ scheme,
mkSource("nice-a", false),
mkSource("slow", true),
)
@@ -1202,7 +1284,7 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
@@ -1210,6 +1292,11 @@ // sometimes sending bad proofs
func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
t.Parallel()
+ testSyncWithStorageAndCorruptPeer(t, rawdb.HashScheme)
+ testSyncWithStorageAndCorruptPeer(t, rawdb.PathScheme)
+}
+
+func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1219,7 +1306,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1232,7 +1319,7 @@ return source
}
syncer := setupSyncer(
- nodeScheme,
+ scheme,
mkSource("nice-a", defaultStorageRequestHandler),
mkSource("nice-b", defaultStorageRequestHandler),
mkSource("nice-c", defaultStorageRequestHandler),
@@ -1243,12 +1330,17 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
t.Parallel()
+ testSyncWithStorageAndNonProvingPeer(t, rawdb.HashScheme)
+ testSyncWithStorageAndNonProvingPeer(t, rawdb.PathScheme)
+}
+
+func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1258,7 +1350,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1270,7 +1362,7 @@ source.storageRequestHandler = handler
return source
}
syncer := setupSyncer(
- nodeScheme,
+ scheme,
mkSource("nice-a", defaultStorageRequestHandler),
mkSource("nice-b", defaultStorageRequestHandler),
mkSource("nice-c", defaultStorageRequestHandler),
@@ -1281,7 +1373,7 @@ if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorage tests basic sync using accounts + storage + code, against
@@ -1290,6 +1382,12 @@ // an error, where the recipient erroneously clipped the boundary nodes, but
// did not mark the account for healing.
func TestSyncWithStorageMisbehavingProve(t *testing.T) {
t.Parallel()
+
+ testSyncWithStorageMisbehavingProve(t, rawdb.HashScheme)
+ testSyncWithStorageMisbehavingProve(t, rawdb.PathScheme)
+}
+
+func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1299,7 +1397,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(scheme, 10, 30, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -1314,7 +1412,46 @@ syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
+}
+
+// TestSyncWithUnevenStorage tests sync where the storage trie is not even
+// and with a few empty ranges.
+func TestSyncWithUnevenStorage(t *testing.T) {
+ t.Parallel()
+
+ testSyncWithUnevenStorage(t, rawdb.HashScheme)
+ testSyncWithUnevenStorage(t, rawdb.PathScheme)
+}
+
+func testSyncWithUnevenStorage(t *testing.T, scheme string) {
+ var (
+ once sync.Once
+ cancel = make(chan struct{})
+ term = func() {
+ once.Do(func() {
+ close(cancel)
+ })
+ }
+ )
+ accountTrie, accounts, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 256, false, false, true)
+
+ mkSource := func(name string) *testPeer {
+ source := newTestPeer(name, t, term)
+ source.accountTrie = accountTrie.Copy()
+ source.accountValues = accounts
+ source.setStorageTries(storageTries)
+ source.storageValues = storageElems
+ source.storageRequestHandler = func(t *testPeer, reqId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
+ return defaultStorageRequestHandler(t, reqId, root, accounts, origin, limit, 128) // retrieve storage in large mode
+ }
+ return source
+ }
+ syncer := setupSyncer(scheme, mkSource("source"))
+ if err := syncer.Sync(accountTrie.Hash(), cancel); err != nil {
+ t.Fatalf("sync failed: %v", err)
+ }
+ verifyTrie(scheme, syncer.db, accountTrie.Hash(), t)
}
type kv struct {
@@ -1364,9 +1501,9 @@ return nil
}
// makeAccountTrieNoStorage spits out a trie, along with the leafs
-func makeAccountTrieNoStorage(n int) (string, *trie.Trie, []*kv) {
+func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv) {
var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
entries []*kv
)
@@ -1396,12 +1533,12 @@
// makeBoundaryAccountTrie constructs an account trie. Instead of filling
// accounts normally, this function will fill a few accounts which have
// boundary hash.
-func makeBoundaryAccountTrie(n int) (string, *trie.Trie, []*kv) {
+func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
var (
entries []*kv
boundaries []common.Hash
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
)
// Initialize boundaries
@@ -1415,7 +1552,7 @@ )
for i := 0; i < accountConcurrency; i++ {
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
if i == accountConcurrency-1 {
- last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ last = common.MaxHash
}
boundaries = append(boundaries, last)
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
@@ -1457,9 +1594,9 @@ }
// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
// has a unique storage set.
-func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
+func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
entries []*kv
storageRoots = make(map[common.Hash]common.Hash)
@@ -1512,9 +1649,9 @@ return db.Scheme(), accTrie, entries, storageTries, storageEntries
}
// makeAccountTrieWithStorage spits out a trie, along with the leafs
-func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
+func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool, uneven bool) (*trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
entries []*kv
storageRoots = make(map[common.Hash]common.Hash)
@@ -1537,6 +1674,8 @@ stEntries []*kv
)
if boundary {
stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
+ } else if uneven {
+ stRoot, stNodes, stEntries = makeUnevenStorageTrie(common.BytesToHash(key), slots, db)
} else {
stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
}
@@ -1579,7 +1718,7 @@ panic(err)
}
storageTries[common.BytesToHash(key)] = trie
}
- return db.Scheme(), accTrie, entries, storageTries, storageEntries
+ return accTrie, entries, storageTries, storageEntries
}
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
@@ -1625,7 +1764,7 @@ )
for i := 0; i < accountConcurrency; i++ {
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
if i == accountConcurrency-1 {
- last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ last = common.MaxHash
}
boundaries = append(boundaries, last)
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
@@ -1656,9 +1795,41 @@ root, nodes, _ := trie.Commit(false)
return root, nodes, entries
}
-func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
+// makeUnevenStorageTrie constructs a storage tries will states distributed in
+// different range unevenly.
+func makeUnevenStorageTrie(owner common.Hash, slots int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
+ var (
+ entries []*kv
+ tr, _ = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
+ chosen = make(map[byte]struct{})
+ )
+ for i := 0; i < 3; i++ {
+ var n int
+ for {
+ n = mrand.Intn(15) // the last range is set empty deliberately
+ if _, ok := chosen[byte(n)]; ok {
+ continue
+ }
+ chosen[byte(n)] = struct{}{}
+ break
+ }
+ for j := 0; j < slots/3; j++ {
+ key := append([]byte{byte(n)}, testutil.RandBytes(31)...)
+ val, _ := rlp.EncodeToBytes(testutil.RandBytes(32))
+
+ elem := &kv{key, val}
+ tr.MustUpdate(elem.k, elem.v)
+ entries = append(entries, elem)
+ }
+ }
+ slices.SortFunc(entries, (*kv).cmp)
+ root, nodes, _ := tr.Commit(false)
+ return root, nodes, entries
+}
+
+func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
t.Helper()
- triedb := trie.NewDatabase(rawdb.NewDatabase(db))
+ triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
accTrie, err := trie.New(trie.StateTrieID(root), triedb)
if err != nil {
t.Fatal(err)
@@ -1700,6 +1871,13 @@
// TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
// state healing
func TestSyncAccountPerformance(t *testing.T) {
+ t.Parallel()
+
+ testSyncAccountPerformance(t, rawdb.HashScheme)
+ testSyncAccountPerformance(t, rawdb.PathScheme)
+}
+
+func testSyncAccountPerformance(t *testing.T, scheme string) {
// Set the account concurrency to 1. This _should_ result in the
// range root to become correct, and there should be no healing needed
defer func(old int) { accountConcurrency = old }(accountConcurrency)
@@ -1714,7 +1892,7 @@ close(cancel)
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -1727,7 +1905,7 @@ syncer := setupSyncer(nodeScheme, src)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
// The trie root will always be requested, since it is added when the snap
// sync cycle starts. When popping the queue, we do not look it up again.
// Doing so would bring this number down to zero in this artificial testcase,
@@ -1787,3 +1965,10 @@ t.Errorf("test %d: have %d want %d", i, have, want)
}
}
}
+
+func newDbConfig(scheme string) *trie.Config {
+ if scheme == rawdb.HashScheme {
+ return &trie.Config{}
+ }
+ return &trie.Config{PathDB: pathdb.Defaults}
+}
diff --git ethereum/go-ethereum/eth/tracers/internal/tracetest/calltrace_test.go taikoxyz/taiko-geth/eth/tracers/internal/tracetest/calltrace_test.go
index 26c4455e301f8c10bf65c138e0db83801515dda0..6df49a90c1d00cb275101652fbcc4fcd29b3fb0c 100644
--- ethereum/go-ethereum/eth/tracers/internal/tracetest/calltrace_test.go
+++ taikoxyz/taiko-geth/eth/tracers/internal/tracetest/calltrace_test.go
@@ -137,8 +137,10 @@ Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
BaseFee: test.Genesis.BaseFee,
}
- _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ triedb, _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
)
+ triedb.Close()
+
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
@@ -237,7 +239,8 @@ Time: uint64(test.Context.Time),
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
- _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
+ defer triedb.Close()
b.ReportAllocs()
b.ResetTimer()
@@ -363,7 +366,7 @@ want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52647880"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600160ff60016000f560ff6000a0"},"0x91ff9a805d36f54e3e272e230f3e3f5c1b330804":{"balance":"0x0"}}`,
},
} {
t.Run(tc.name, func(t *testing.T) {
- _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(),
+ triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(),
core.GenesisAlloc{
to: core.GenesisAccount{
Code: tc.code,
@@ -371,7 +374,9 @@ },
origin: core.GenesisAccount{
Balance: big.NewInt(500000000000000),
},
- }, false)
+ }, false, rawdb.HashScheme)
+ defer triedb.Close()
+
evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Tracer: tc.tracer})
msg := &core.Message{
To: &to,
diff --git ethereum/go-ethereum/eth/tracers/internal/tracetest/flat_calltrace_test.go taikoxyz/taiko-geth/eth/tracers/internal/tracetest/flat_calltrace_test.go
index 85e95401a21557a97cffab48f772d7e79ca3db0a..423167b13ccd58cfb3d7c04be73c40430ba8f273 100644
--- ethereum/go-ethereum/eth/tracers/internal/tracetest/flat_calltrace_test.go
+++ taikoxyz/taiko-geth/eth/tracers/internal/tracetest/flat_calltrace_test.go
@@ -100,7 +100,8 @@ Time: uint64(test.Context.Time),
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
- _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
+ defer triedb.Close()
// Create the tracer, the EVM environment and run it
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
diff --git ethereum/go-ethereum/eth/tracers/internal/tracetest/prestate_test.go taikoxyz/taiko-geth/eth/tracers/internal/tracetest/prestate_test.go
index 991da10b3ab108077b70a99426270c4f844bd796..b4fa5b627269b778a88253fb08a5425fe5e6a12d 100644
--- ethereum/go-ethereum/eth/tracers/internal/tracetest/prestate_test.go
+++ taikoxyz/taiko-geth/eth/tracers/internal/tracetest/prestate_test.go
@@ -108,8 +108,10 @@ Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
BaseFee: test.Genesis.BaseFee,
}
- _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ triedb, _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
)
+ defer triedb.Close()
+
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
diff --git ethereum/go-ethereum/eth/tracers/js/goja.go taikoxyz/taiko-geth/eth/tracers/js/goja.go
index f3d63df8ed5d87f1dc3e5823c68ea532e155aee3..d22d140988fe16cf2132e6b0fed79154751fb92e 100644
--- ethereum/go-ethereum/eth/tracers/js/goja.go
+++ taikoxyz/taiko-geth/eth/tracers/js/goja.go
@@ -236,7 +236,12 @@ t.ctx["from"] = t.vm.ToValue(from.Bytes())
t.ctx["to"] = t.vm.ToValue(to.Bytes())
t.ctx["input"] = t.vm.ToValue(input)
t.ctx["gas"] = t.vm.ToValue(t.gasLimit)
- t.ctx["gasPrice"] = t.vm.ToValue(env.TxContext.GasPrice)
+ gasPriceBig, err := t.toBig(t.vm, env.TxContext.GasPrice.String())
+ if err != nil {
+ t.err = err
+ return
+ }
+ t.ctx["gasPrice"] = gasPriceBig
valueBig, err := t.toBig(t.vm, value.String())
if err != nil {
t.err = err
diff --git ethereum/go-ethereum/ethclient/gethclient/gethclient.go taikoxyz/taiko-geth/ethclient/gethclient/gethclient.go
index c029611678f0eda620e81c27f10680fd2fae2392..e2c0ef3ed02ea7b1a575f85959203d63b173b3ed 100644
--- ethereum/go-ethereum/ethclient/gethclient/gethclient.go
+++ taikoxyz/taiko-geth/ethclient/gethclient/gethclient.go
@@ -225,7 +225,7 @@ "from": msg.From,
"to": msg.To,
}
if len(msg.Data) > 0 {
- arg["data"] = hexutil.Bytes(msg.Data)
+ arg["input"] = hexutil.Bytes(msg.Data)
}
if msg.Value != nil {
arg["value"] = (*hexutil.Big)(msg.Value)
diff --git ethereum/go-ethereum/ethclient/gethclient/gethclient_test.go taikoxyz/taiko-geth/ethclient/gethclient/gethclient_test.go
index 5a0f4d2534e32d7a2e66a52d53f80f2fc56ed127..de45b106957afd3f65d81d7272f4ef8aba012deb 100644
--- ethereum/go-ethereum/ethclient/gethclient/gethclient_test.go
+++ taikoxyz/taiko-geth/ethclient/gethclient/gethclient_test.go
@@ -39,11 +39,12 @@ "github.com/ethereum/go-ethereum/rpc"
)
var (
- testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
- testSlot = common.HexToHash("0xdeadbeef")
- testValue = crypto.Keccak256Hash(testSlot[:])
- testBalance = big.NewInt(2e15)
+ testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
+ testContract = common.HexToAddress("0xbeef")
+ testSlot = common.HexToHash("0xdeadbeef")
+ testValue = crypto.Keccak256Hash(testSlot[:])
+ testBalance = big.NewInt(2e15)
)
func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
@@ -78,8 +79,9 @@ }
func generateTestChain() (*core.Genesis, []*types.Block) {
genesis := &core.Genesis{
- Config: params.AllEthashProtocolChanges,
- Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance, Storage: map[common.Hash]common.Hash{testSlot: testValue}}},
+ Config: params.AllEthashProtocolChanges,
+ Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance, Storage: map[common.Hash]common.Hash{testSlot: testValue}},
+ testContract: {Nonce: 1, Code: []byte{0x13, 0x37}}},
ExtraData: []byte("test genesis"),
Timestamp: 9000,
}
@@ -103,8 +105,11 @@ name string
test func(t *testing.T)
}{
{
- "TestGetProof",
- func(t *testing.T) { testGetProof(t, client) },
+ "TestGetProof1",
+ func(t *testing.T) { testGetProof(t, client, testAddr) },
+ }, {
+ "TestGetProof2",
+ func(t *testing.T) { testGetProof(t, client, testContract) },
}, {
"TestGetProofCanonicalizeKeys",
func(t *testing.T) { testGetProofCanonicalizeKeys(t, client) },
@@ -201,38 +206,41 @@ t.Fatalf("unexpected storage key: %v", (*al)[0].StorageKeys[0])
}
}
-func testGetProof(t *testing.T, client *rpc.Client) {
+func testGetProof(t *testing.T, client *rpc.Client, addr common.Address) {
ec := New(client)
ethcl := ethclient.NewClient(client)
- result, err := ec.GetProof(context.Background(), testAddr, []string{testSlot.String()}, nil)
+ result, err := ec.GetProof(context.Background(), addr, []string{testSlot.String()}, nil)
if err != nil {
t.Fatal(err)
}
- if !bytes.Equal(result.Address[:], testAddr[:]) {
- t.Fatalf("unexpected address, want: %v got: %v", testAddr, result.Address)
+ if result.Address != addr {
+ t.Fatalf("unexpected address, have: %v want: %v", result.Address, addr)
}
// test nonce
- nonce, _ := ethcl.NonceAt(context.Background(), result.Address, nil)
- if result.Nonce != nonce {
+ if nonce, _ := ethcl.NonceAt(context.Background(), addr, nil); result.Nonce != nonce {
t.Fatalf("invalid nonce, want: %v got: %v", nonce, result.Nonce)
}
// test balance
- balance, _ := ethcl.BalanceAt(context.Background(), result.Address, nil)
- if result.Balance.Cmp(balance) != 0 {
+ if balance, _ := ethcl.BalanceAt(context.Background(), addr, nil); result.Balance.Cmp(balance) != 0 {
t.Fatalf("invalid balance, want: %v got: %v", balance, result.Balance)
}
-
// test storage
if len(result.StorageProof) != 1 {
t.Fatalf("invalid storage proof, want 1 proof, got %v proof(s)", len(result.StorageProof))
}
- proof := result.StorageProof[0]
- slotValue, _ := ethcl.StorageAt(context.Background(), testAddr, testSlot, nil)
- if !bytes.Equal(slotValue, proof.Value.Bytes()) {
- t.Fatalf("invalid storage proof value, want: %v, got: %v", slotValue, proof.Value.Bytes())
+ for _, proof := range result.StorageProof {
+ if proof.Key != testSlot.String() {
+ t.Fatalf("invalid storage proof key, want: %q, got: %q", testSlot.String(), proof.Key)
+ }
+ slotValue, _ := ethcl.StorageAt(context.Background(), addr, common.HexToHash(proof.Key), nil)
+ if have, want := common.BigToHash(proof.Value), common.BytesToHash(slotValue); have != want {
+ t.Fatalf("addr %x, invalid storage proof value: have: %v, want: %v", addr, have, want)
+ }
}
- if proof.Key != testSlot.String() {
- t.Fatalf("invalid storage proof key, want: %q, got: %q", testSlot.String(), proof.Key)
+ // test code
+ code, _ := ethcl.CodeAt(context.Background(), addr, nil)
+ if have, want := result.CodeHash, crypto.Keccak256Hash(code); have != want {
+ t.Fatalf("codehash wrong, have %v want %v ", have, want)
}
}
diff --git ethereum/go-ethereum/ethdb/leveldb/leveldb.go taikoxyz/taiko-geth/ethdb/leveldb/leveldb.go
index c0e0eb250a75811dabb804fd5800ce27a7812810..e58efbddbe80eb271c706db78f52672db4411cf8 100644
--- ethereum/go-ethereum/ethdb/leveldb/leveldb.go
+++ taikoxyz/taiko-geth/ethdb/leveldb/leveldb.go
@@ -22,6 +22,7 @@ package leveldb
import (
"fmt"
+ "strings"
"sync"
"time"
@@ -245,6 +246,11 @@ }
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
+ if property == "" {
+ property = "leveldb.stats"
+ } else if !strings.HasPrefix(property, "leveldb.") {
+ property = "leveldb." + property
+ }
return db.db.GetProperty(property)
}
diff --git ethereum/go-ethereum/ethdb/pebble/pebble.go taikoxyz/taiko-geth/ethdb/pebble/pebble.go
index a06f59bcfae4e555a634a2cbfe1d6ed1e4921c7d..691aa732998642631bd646e4ef2070310a18c48e 100644
--- ethereum/go-ethereum/ethdb/pebble/pebble.go
+++ taikoxyz/taiko-geth/ethdb/pebble/pebble.go
@@ -14,8 +14,6 @@ //
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-//go:build (arm64 || amd64) && !openbsd
-
// Package pebble implements the key-value database layer based on pebble.
package pebble
@@ -27,6 +25,7 @@ "sync"
"sync/atomic"
"time"
+ "github.com/cockroachdb/errors"
"github.com/cockroachdb/pebble"
"github.com/cockroachdb/pebble/bloom"
"github.com/ethereum/go-ethereum/common"
@@ -70,6 +69,8 @@ nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
manualMemAllocGauge metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated
+ levelsGauge []metrics.Gauge // Gauge for tracking the number of tables in levels
+
quitLock sync.RWMutex // Mutex protecting the quit channel and the closed flag
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
closed bool // keep track of whether we're Closed
@@ -84,6 +85,8 @@ nonLevel0Comp atomic.Uint32 // Total number of non level-zero compactions
writeDelayStartTime time.Time // The start time of the latest write stall
writeDelayCount atomic.Int64 // Total number of write stall counts
writeDelayTime atomic.Int64 // Total time spent in write stalls
+
+ writeOptions *pebble.WriteOptions
}
func (d *Database) onCompactionBegin(info pebble.CompactionInfo) {
@@ -116,9 +119,21 @@ func (d *Database) onWriteStallEnd() {
d.writeDelayTime.Add(int64(time.Since(d.writeDelayStartTime)))
}
+// panicLogger is just a noop logger to disable Pebble's internal logger.
+//
+// TODO(karalabe): Remove when Pebble sets this as the default.
+type panicLogger struct{}
+
+func (l panicLogger) Infof(format string, args ...interface{}) {
+}
+
+func (l panicLogger) Fatalf(format string, args ...interface{}) {
+ panic(errors.Errorf("fatal: "+format, args...))
+}
+
// New returns a wrapped pebble DB object. The namespace is the prefix that the
// metrics reporting should use for surfacing internal stats.
-func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) {
+func New(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (*Database, error) {
// Ensure we have some minimal caching and file guarantees
if cache < minCache {
cache = minCache
@@ -131,8 +146,15 @@ logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles)
// The max memtable size is limited by the uint32 offsets stored in
// internal/arenaskl.node, DeferredBatchOp, and flushableBatchEntry.
- // Taken from https://github.com/cockroachdb/pebble/blob/master/open.go#L38
- maxMemTableSize := 4<<30 - 1 // Capped by 4 GB
+ //
+ // - MaxUint32 on 64-bit platforms;
+ // - MaxInt on 32-bit platforms.
+ //
+ // It is used when slices are limited to Uint32 on 64-bit platforms (the
+ // length limit for slices is naturally MaxInt on 32-bit platforms).
+ //
+ // Taken from https://github.com/cockroachdb/pebble/blob/master/internal/constants/constants.go
+ maxMemTableSize := (1<<31)<<(^uint(0)>>63) - 1
// Two memory tables is configured which is identical to leveldb,
// including a frozen memory table and another live one.
@@ -142,9 +164,10 @@ if memTableSize > maxMemTableSize {
memTableSize = maxMemTableSize
}
db := &Database{
- fn: file,
- log: logger,
- quitChan: make(chan chan error),
+ fn: file,
+ log: logger,
+ quitChan: make(chan chan error),
+ writeOptions: &pebble.WriteOptions{Sync: !ephemeral},
}
opt := &pebble.Options{
// Pebble has a single combined cache area and the write
@@ -155,7 +178,7 @@ MaxOpenFiles: handles,
// The size of memory table(as well as the write buffer).
// Note, there may have more than two memory tables in the system.
- MemTableSize: memTableSize,
+ MemTableSize: uint64(memTableSize),
// MemTableStopWritesThreshold places a hard limit on the size
// of the existent MemTables(including the frozen one).
@@ -186,6 +209,7 @@ CompactionEnd: db.onCompactionEnd,
WriteStallBegin: db.onWriteStallBegin,
WriteStallEnd: db.onWriteStallEnd,
},
+ Logger: panicLogger{}, // TODO(karalabe): Delete when this is upstreamed in Pebble
}
// Disable seek compaction explicitly. Check https://github.com/ethereum/go-ethereum/pull/20130
// for more details.
@@ -213,7 +237,7 @@ db.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil)
db.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil)
// Start up the metrics gathering and return
- go db.meter(metricsGatheringInterval)
+ go db.meter(metricsGatheringInterval, namespace)
return db, nil
}
@@ -279,7 +303,7 @@ defer d.quitLock.RUnlock()
if d.closed {
return pebble.ErrClosed
}
- return d.db.Set(key, value, pebble.Sync)
+ return d.db.Set(key, value, d.writeOptions)
}
// Delete removes the key from the key-value store.
@@ -302,12 +326,9 @@ }
}
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
-// It's not supported by pebble, but pebble has better memory allocation strategy
-// which turns out a lot faster than leveldb. It's performant enough to construct
-// batch object without any pre-allocated space.
-func (d *Database) NewBatchWithSize(_ int) ethdb.Batch {
+func (d *Database) NewBatchWithSize(size int) ethdb.Batch {
return &batch{
- b: d.db.NewBatch(),
+ b: d.db.NewBatchWithSize(size),
db: d,
}
}
@@ -376,9 +397,12 @@ }
return limit
}
-// Stat returns a particular internal stat of the database.
+// Stat returns the internal metrics of Pebble in a text format. It's a developer
+// method to read everything there is to read independent of Pebble version.
+//
+// The property is unused in Pebble as there's only one thing to retrieve.
func (d *Database) Stat(property string) (string, error) {
- return "", nil
+ return d.db.Metrics().String(), nil
}
// Compact flattens the underlying data store for the given key range. In essence,
@@ -410,7 +434,7 @@ }
// meter periodically retrieves internal pebble counters and reports them to
// the metrics subsystem.
-func (d *Database) meter(refresh time.Duration) {
+func (d *Database) meter(refresh time.Duration, namespace string) {
var errc chan error
timer := time.NewTimer(refresh)
defer timer.Stop()
@@ -433,7 +457,7 @@ compWrite int64
compRead int64
nWrite int64
- metrics = d.db.Metrics()
+ stats = d.db.Metrics()
compTime = d.compTime.Load()
writeDelayCount = d.writeDelayCount.Load()
writeDelayTime = d.writeDelayTime.Load()
@@ -444,14 +468,14 @@ writeDelayTimes[i%2] = writeDelayTime
writeDelayCounts[i%2] = writeDelayCount
compTimes[i%2] = compTime
- for _, levelMetrics := range metrics.Levels {
+ for _, levelMetrics := range stats.Levels {
nWrite += int64(levelMetrics.BytesCompacted)
nWrite += int64(levelMetrics.BytesFlushed)
compWrite += int64(levelMetrics.BytesCompacted)
compRead += int64(levelMetrics.BytesRead)
}
- nWrite += int64(metrics.WAL.BytesWritten)
+ nWrite += int64(stats.WAL.BytesWritten)
compWrites[i%2] = compWrite
compReads[i%2] = compRead
@@ -473,7 +497,7 @@ if d.compWriteMeter != nil {
d.compWriteMeter.Mark(compWrites[i%2] - compWrites[(i-1)%2])
}
if d.diskSizeGauge != nil {
- d.diskSizeGauge.Update(int64(metrics.DiskSpaceUsage()))
+ d.diskSizeGauge.Update(int64(stats.DiskSpaceUsage()))
}
if d.diskReadMeter != nil {
d.diskReadMeter.Mark(0) // pebble doesn't track non-compaction reads
@@ -482,12 +506,20 @@ if d.diskWriteMeter != nil {
d.diskWriteMeter.Mark(nWrites[i%2] - nWrites[(i-1)%2])
}
// See https://github.com/cockroachdb/pebble/pull/1628#pullrequestreview-1026664054
- manuallyAllocated := metrics.BlockCache.Size + int64(metrics.MemTable.Size) + int64(metrics.MemTable.ZombieSize)
+ manuallyAllocated := stats.BlockCache.Size + int64(stats.MemTable.Size) + int64(stats.MemTable.ZombieSize)
d.manualMemAllocGauge.Update(manuallyAllocated)
- d.memCompGauge.Update(metrics.Flush.Count)
+ d.memCompGauge.Update(stats.Flush.Count)
d.nonlevel0CompGauge.Update(nonLevel0CompCount)
d.level0CompGauge.Update(level0CompCount)
- d.seekCompGauge.Update(metrics.Compact.ReadCount)
+ d.seekCompGauge.Update(stats.Compact.ReadCount)
+
+ for i, level := range stats.Levels {
+ // Append metrics for additional layers
+ if i >= len(d.levelsGauge) {
+ d.levelsGauge = append(d.levelsGauge, metrics.NewRegisteredGauge(namespace+fmt.Sprintf("tables/level%v", i), nil))
+ }
+ d.levelsGauge[i].Update(level.NumFiles)
+ }
// Sleep a bit, then repeat the stats collection
select {
@@ -535,7 +567,7 @@ defer b.db.quitLock.RUnlock()
if b.db.closed {
return pebble.ErrClosed
}
- return b.b.Commit(pebble.Sync)
+ return b.b.Commit(b.db.writeOptions)
}
// Reset resets the batch for reuse.
@@ -576,7 +608,7 @@ // NewIterator creates a binary-alphabetical iterator over a subset
// of database content with a particular key prefix, starting at a particular
// initial key (or after, if it does not exist).
func (d *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
- iter := d.db.NewIter(&pebble.IterOptions{
+ iter, _ := d.db.NewIter(&pebble.IterOptions{
LowerBound: append(prefix, start...),
UpperBound: upperBound(prefix),
})
diff --git ethereum/go-ethereum/ethdb/pebble/pebble_test.go taikoxyz/taiko-geth/ethdb/pebble/pebble_test.go
index 590d5bf0353d0ac09aa7904455246e3e4b153710..1d5611f211e3649ee488a3fb94a2a073179a15c4 100644
--- ethereum/go-ethereum/ethdb/pebble/pebble_test.go
+++ taikoxyz/taiko-geth/ethdb/pebble/pebble_test.go
@@ -14,8 +14,6 @@ //
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-//go:build (arm64 || amd64) && !openbsd
-
package pebble
import (
diff --git ethereum/go-ethereum/graphql/graphql.go taikoxyz/taiko-geth/graphql/graphql.go
index 7aa427b458bf75f665376122b32ab738b70f6f3a..8304a64cf49f4596abdbe381d7cc2425c98e6bff 100644
--- ethereum/go-ethereum/graphql/graphql.go
+++ taikoxyz/taiko-geth/graphql/graphql.go
@@ -272,8 +272,6 @@ if tx == nil {
return hexutil.Big{}
}
switch tx.Type() {
- case types.AccessListTxType:
- return hexutil.Big(*tx.GasPrice())
case types.DynamicFeeTxType:
if block != nil {
if baseFee, _ := block.BaseFeePerGas(ctx); baseFee != nil {
@@ -312,9 +310,7 @@ if tx == nil {
return nil
}
switch tx.Type() {
- case types.AccessListTxType:
- return nil
- case types.DynamicFeeTxType:
+ case types.DynamicFeeTxType, types.BlobTxType:
return (*hexutil.Big)(tx.GasFeeCap())
default:
return nil
@@ -327,13 +323,31 @@ if tx == nil {
return nil
}
switch tx.Type() {
- case types.AccessListTxType:
- return nil
- case types.DynamicFeeTxType:
+ case types.DynamicFeeTxType, types.BlobTxType:
return (*hexutil.Big)(tx.GasTipCap())
default:
return nil
}
+}
+
+func (t *Transaction) MaxFeePerBlobGas(ctx context.Context) *hexutil.Big {
+ tx, _ := t.resolve(ctx)
+ if tx == nil {
+ return nil
+ }
+ return (*hexutil.Big)(tx.BlobGasFeeCap())
+}
+
+func (t *Transaction) BlobVersionedHashes(ctx context.Context) *[]common.Hash {
+ tx, _ := t.resolve(ctx)
+ if tx == nil {
+ return nil
+ }
+ if tx.Type() != types.BlobTxType {
+ return nil
+ }
+ blobHashes := tx.BlobHashes()
+ return &blobHashes
}
func (t *Transaction) EffectiveTip(ctx context.Context) (*hexutil.Big, error) {
@@ -466,6 +480,40 @@ return nil, err
}
ret := hexutil.Uint64(receipt.CumulativeGasUsed)
return &ret, nil
+}
+
+func (t *Transaction) BlobGasUsed(ctx context.Context) (*hexutil.Uint64, error) {
+ tx, _ := t.resolve(ctx)
+ if tx == nil {
+ return nil, nil
+ }
+ if tx.Type() != types.BlobTxType {
+ return nil, nil
+ }
+
+ receipt, err := t.getReceipt(ctx)
+ if err != nil || receipt == nil {
+ return nil, err
+ }
+ ret := hexutil.Uint64(receipt.BlobGasUsed)
+ return &ret, nil
+}
+
+func (t *Transaction) BlobGasPrice(ctx context.Context) (*hexutil.Big, error) {
+ tx, _ := t.resolve(ctx)
+ if tx == nil {
+ return nil, nil
+ }
+ if tx.Type() != types.BlobTxType {
+ return nil, nil
+ }
+
+ receipt, err := t.getReceipt(ctx)
+ if err != nil || receipt == nil {
+ return nil, err
+ }
+ ret := (*hexutil.Big)(receipt.BlobGasPrice)
+ return ret, nil
}
func (t *Transaction) CreatedContract(ctx context.Context, args BlockNumberArgs) (*Account, error) {
@@ -1019,6 +1067,30 @@ }
return &ret, nil
}
+func (b *Block) BlobGasUsed(ctx context.Context) (*hexutil.Uint64, error) {
+ header, err := b.resolveHeader(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if header.BlobGasUsed == nil {
+ return nil, nil
+ }
+ ret := hexutil.Uint64(*header.BlobGasUsed)
+ return &ret, nil
+}
+
+func (b *Block) ExcessBlobGas(ctx context.Context) (*hexutil.Uint64, error) {
+ header, err := b.resolveHeader(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if header.ExcessBlobGas == nil {
+ return nil, nil
+ }
+ ret := hexutil.Uint64(*header.ExcessBlobGas)
+ return &ret, nil
+}
+
// BlockFilterCriteria encapsulates criteria passed to a `logs` accessor inside
// a block.
type BlockFilterCriteria struct {
@@ -1217,6 +1289,9 @@ func (r *Resolver) Block(ctx context.Context, args struct {
Number *Long
Hash *common.Hash
}) (*Block, error) {
+ if args.Number != nil && args.Hash != nil {
+ return nil, errors.New("only one of number or hash must be specified")
+ }
var numberOrHash rpc.BlockNumberOrHash
if args.Number != nil {
if *args.Number < 0 {
diff --git ethereum/go-ethereum/graphql/schema.go taikoxyz/taiko-geth/graphql/schema.go
index 5de5bad305e85c758a94ddeac8c839602f647fcb..5738923fc17087a77869b9fc6ee9be8901186faa 100644
--- ethereum/go-ethereum/graphql/schema.go
+++ taikoxyz/taiko-geth/graphql/schema.go
@@ -71,8 +71,8 @@ # Transaction is the transaction that generated this log entry.
transaction: Transaction!
}
- #EIP-2718
- type AccessTuple{
+ # EIP-2718
+ type AccessTuple {
address: Address!
storageKeys : [Bytes32!]!
}
@@ -112,6 +112,8 @@ # MaxFeePerGas is the maximum fee per gas offered to include a transaction, in wei.
maxFeePerGas: BigInt
# MaxPriorityFeePerGas is the maximum miner tip per gas offered to include a transaction, in wei.
maxPriorityFeePerGas: BigInt
+ # MaxFeePerBlobGas is the maximum blob gas fee cap per blob the sender is willing to pay for blob transaction, in wei.
+ maxFeePerBlobGas: BigInt
# EffectiveTip is the actual amount of reward going to miner after considering the max fee cap.
effectiveTip: BigInt
# Gas is the maximum amount of gas this transaction can consume.
@@ -141,6 +143,10 @@ # maxPriorityFeePerGas). Legacy transactions and EIP-2930 transactions are
# coerced into the EIP-1559 format by setting both maxFeePerGas and
# maxPriorityFeePerGas as the transaction's gas price.
effectiveGasPrice: BigInt
+ # BlobGasUsed is the amount of blob gas used by this transaction.
+ blobGasUsed: Long
+ # blobGasPrice is the actual value per blob gas deducted from the senders account.
+ blobGasPrice: BigInt
# CreatedContract is the account that was created by a contract creation
# transaction. If the transaction was not a contract creation transaction,
# or it has not yet been mined, this field will be null.
@@ -162,6 +168,8 @@ raw: Bytes!
# RawReceipt is the canonical encoding of the receipt. For post EIP-2718 typed transactions
# this is equivalent to TxType || ReceiptEncoding.
rawReceipt: Bytes!
+ # BlobVersionedHashes is a set of hash outputs from the blobs in the transaction.
+ blobVersionedHashes: [Bytes32!]
}
# BlockFilterCriteria encapsulates log filter criteria for a filter applied
@@ -171,16 +179,16 @@ # Addresses is list of addresses that are of interest. If this list is
# empty, results will not be filtered by address.
addresses: [Address!]
# Topics list restricts matches to particular event topics. Each event has a list
- # of topics. Topics matches a prefix of that list. An empty element array matches any
- # topic. Non-empty elements represent an alternative that matches any of the
- # contained topics.
- #
- # Examples:
- # - [] or nil matches any topic list
- # - [[A]] matches topic A in first position
- # - [[], [B]] matches any topic in first position, B in second position
- # - [[A], [B]] matches topic A in first position, B in second position
- # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
+ # of topics. Topics matches a prefix of that list. An empty element array matches any
+ # topic. Non-empty elements represent an alternative that matches any of the
+ # contained topics.
+ #
+ # Examples:
+ # - [] or nil matches any topic list
+ # - [[A]] matches topic A in first position
+ # - [[], [B]] matches any topic in first position, B in second position
+ # - [[A], [B]] matches topic A in first position, B in second position
+ # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
topics: [[Bytes32!]!]
}
@@ -267,6 +275,10 @@ withdrawalsRoot: Bytes32
# Withdrawals is a list of withdrawals associated with this block. If
# withdrawals are unavailable for this block, this field will be null.
withdrawals: [Withdrawal!]
+ # BlobGasUsed is the total amount of gas used by the transactions.
+ blobGasUsed: Long
+ # ExcessBlobGas is a running total of blob gas consumed in excess of the target, prior to the block.
+ excessBlobGas: Long
}
# CallData represents the data associated with a local contract call.
@@ -312,21 +324,21 @@ # Addresses is a list of addresses that are of interest. If this list is
# empty, results will not be filtered by address.
addresses: [Address!]
# Topics list restricts matches to particular event topics. Each event has a list
- # of topics. Topics matches a prefix of that list. An empty element array matches any
- # topic. Non-empty elements represent an alternative that matches any of the
- # contained topics.
- #
- # Examples:
- # - [] or nil matches any topic list
- # - [[A]] matches topic A in first position
- # - [[], [B]] matches any topic in first position, B in second position
- # - [[A], [B]] matches topic A in first position, B in second position
- # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
+ # of topics. Topics matches a prefix of that list. An empty element array matches any
+ # topic. Non-empty elements represent an alternative that matches any of the
+ # contained topics.
+ #
+ # Examples:
+ # - [] or nil matches any topic list
+ # - [[A]] matches topic A in first position
+ # - [[], [B]] matches any topic in first position, B in second position
+ # - [[A], [B]] matches topic A in first position, B in second position
+ # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
topics: [[Bytes32!]!]
}
# SyncState contains the current synchronisation state of the client.
- type SyncState{
+ type SyncState {
# StartingBlock is the block number at which synchronisation started.
startingBlock: Long!
# CurrentBlock is the point at which synchronisation has presently reached.
@@ -337,17 +349,17 @@ }
# Pending represents the current pending state.
type Pending {
- # TransactionCount is the number of transactions in the pending state.
- transactionCount: Long!
- # Transactions is a list of transactions in the current pending state.
- transactions: [Transaction!]
- # Account fetches an Ethereum account for the pending state.
- account(address: Address!): Account!
- # Call executes a local call operation for the pending state.
- call(data: CallData!): CallResult
- # EstimateGas estimates the amount of gas that will be required for
- # successful execution of a transaction for the pending state.
- estimateGas(data: CallData!): Long!
+ # TransactionCount is the number of transactions in the pending state.
+ transactionCount: Long!
+ # Transactions is a list of transactions in the current pending state.
+ transactions: [Transaction!]
+ # Account fetches an Ethereum account for the pending state.
+ account(address: Address!): Account!
+ # Call executes a local call operation for the pending state.
+ call(data: CallData!): CallResult
+ # EstimateGas estimates the amount of gas that will be required for
+ # successful execution of a transaction for the pending state.
+ estimateGas(data: CallData!): Long!
}
type Query {
diff --git ethereum/go-ethereum/graphql/service.go taikoxyz/taiko-geth/graphql/service.go
index 4ca427658a94b3677cd6f73104edf92e4836a460..f33e763058e33e7988eb02a56ab06f9ee12d841c 100644
--- ethereum/go-ethereum/graphql/service.go
+++ taikoxyz/taiko-geth/graphql/service.go
@@ -88,7 +88,9 @@ })
}
response := h.Schema.Exec(ctx, params.Query, params.OperationName, params.Variables)
- timer.Stop()
+ if timer != nil {
+ timer.Stop()
+ }
responded.Do(func() {
responseJSON, err := json.Marshal(response)
if err != nil {
diff --git ethereum/go-ethereum/internal/build/azure.go taikoxyz/taiko-geth/internal/build/azure.go
index 9d1c4f300a88f3d439f15e41273b763227c19e77..4085228d14e38b28444fbc0d4bf5f36140d36cd2 100644
--- ethereum/go-ethereum/internal/build/azure.go
+++ taikoxyz/taiko-geth/internal/build/azure.go
@@ -22,6 +22,7 @@ "fmt"
"os"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
)
// AzureBlobstoreConfig is an authentication and configuration struct containing
@@ -48,8 +49,8 @@ credential, err := azblob.NewSharedKeyCredential(config.Account, config.Token)
if err != nil {
return err
}
- u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
- container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
+ a := fmt.Sprintf("https://%s.blob.core.windows.net/", config.Account)
+ client, err := azblob.NewClientWithSharedKeyCredential(a, credential, nil)
if err != nil {
return err
}
@@ -60,38 +61,38 @@ return err
}
defer in.Close()
- blockblob := container.NewBlockBlobClient(name)
- _, err = blockblob.Upload(context.Background(), in, nil)
+ _, err = client.UploadFile(context.Background(), config.Container, name, in, nil)
return err
}
// AzureBlobstoreList lists all the files contained within an azure blobstore.
-func AzureBlobstoreList(config AzureBlobstoreConfig) ([]*azblob.BlobItemInternal, error) {
+func AzureBlobstoreList(config AzureBlobstoreConfig) ([]*container.BlobItem, error) {
// Create an authenticated client against the Azure cloud
credential, err := azblob.NewSharedKeyCredential(config.Account, config.Token)
if err != nil {
return nil, err
}
- u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
- container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
+ a := fmt.Sprintf("https://%s.blob.core.windows.net/", config.Account)
+ client, err := azblob.NewClientWithSharedKeyCredential(a, credential, nil)
if err != nil {
return nil, err
}
- var maxResults int32 = 5000
- pager := container.ListBlobsFlat(&azblob.ContainerListBlobFlatSegmentOptions{
- Maxresults: &maxResults,
- })
- var allBlobs []*azblob.BlobItemInternal
- for pager.NextPage(context.Background()) {
- res := pager.PageResponse()
- allBlobs = append(allBlobs, res.ContainerListBlobFlatSegmentResult.Segment.BlobItems...)
+ pager := client.NewListBlobsFlatPager(config.Container, nil)
+
+ var blobs []*container.BlobItem
+ for pager.More() {
+ page, err := pager.NextPage(context.TODO())
+ if err != nil {
+ return nil, err
+ }
+ blobs = append(blobs, page.Segment.BlobItems...)
}
- return allBlobs, pager.Err()
+ return blobs, nil
}
// AzureBlobstoreDelete iterates over a list of files to delete and removes them
// from the blobstore.
-func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []*azblob.BlobItemInternal) error {
+func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []*container.BlobItem) error {
if *DryRunFlag {
for _, blob := range blobs {
fmt.Printf("would delete %s (%s) from %s/%s\n", *blob.Name, blob.Properties.LastModified, config.Account, config.Container)
@@ -103,15 +104,14 @@ credential, err := azblob.NewSharedKeyCredential(config.Account, config.Token)
if err != nil {
return err
}
- u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
- container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
+ a := fmt.Sprintf("https://%s.blob.core.windows.net/", config.Account)
+ client, err := azblob.NewClientWithSharedKeyCredential(a, credential, nil)
if err != nil {
return err
}
// Iterate over the blobs and delete them
for _, blob := range blobs {
- blockblob := container.NewBlockBlobClient(*blob.Name)
- if _, err := blockblob.Delete(context.Background(), &azblob.DeleteBlobOptions{}); err != nil {
+ if _, err := client.DeleteBlob(context.Background(), config.Container, *blob.Name, nil); err != nil {
return err
}
fmt.Printf("deleted %s (%s)\n", *blob.Name, blob.Properties.LastModified)
diff --git ethereum/go-ethereum/internal/build/gotool.go taikoxyz/taiko-geth/internal/build/gotool.go
index 296ba8c36ecb050a269af4a498abc802c15b5618..32ca20e869a54251f2d9150a5a06536fdcba6e0c 100644
--- ethereum/go-ethereum/internal/build/gotool.go
+++ taikoxyz/taiko-geth/internal/build/gotool.go
@@ -84,7 +84,11 @@ }
// DownloadGo downloads the Go binary distribution and unpacks it into a temporary
// directory. It returns the GOROOT of the unpacked toolchain.
-func DownloadGo(csdb *ChecksumDB, version string) string {
+func DownloadGo(csdb *ChecksumDB) string {
+ version, err := Version(csdb, "golang")
+ if err != nil {
+ log.Fatal(err)
+ }
// Shortcut: if the Go version that runs this script matches the
// requested version exactly, there is no need to download anything.
activeGo := strings.TrimPrefix(runtime.Version(), "go")
@@ -126,3 +130,52 @@ log.Fatal(err)
}
return goroot
}
+
+// Version returns the versions defined in the checksumdb.
+func Version(csdb *ChecksumDB, version string) (string, error) {
+ for _, l := range csdb.allChecksums {
+ if !strings.HasPrefix(l, "# version:") {
+ continue
+ }
+ v := strings.Split(l, ":")[1]
+ parts := strings.Split(v, " ")
+ if len(parts) != 2 {
+ log.Print("Erroneous version-string", "v", l)
+ continue
+ }
+ if parts[0] == version {
+ log.Printf("Found version %q", parts[1])
+ return parts[1], nil
+ }
+ }
+ return "", fmt.Errorf("no version found for '%v'", version)
+}
+
+// DownloadAndVerifyChecksums downloads all files and checks that they match
+// the checksum given in checksums.txt.
+// This task can be used to sanity-check new checksums.
+func DownloadAndVerifyChecksums(csdb *ChecksumDB) {
+ var (
+ base = ""
+ ucache = os.TempDir()
+ )
+ for _, l := range csdb.allChecksums {
+ if strings.HasPrefix(l, "# https://") {
+ base = l[2:]
+ continue
+ }
+ if strings.HasPrefix(l, "#") {
+ continue
+ }
+ hashFile := strings.Split(l, " ")
+ if len(hashFile) != 2 {
+ continue
+ }
+ file := hashFile[1]
+ url := base + file
+ dst := filepath.Join(ucache, file)
+ if err := csdb.DownloadFile(url, dst); err != nil {
+ log.Print(err)
+ }
+ }
+}
diff --git ethereum/go-ethereum/internal/debug/flags.go taikoxyz/taiko-geth/internal/debug/flags.go
index 52a6342452a17ea09b9311cfd03fa2454b4188cf..736fede9433499a7dc02741a7a28e9aa53aabdc7 100644
--- ethereum/go-ethereum/internal/debug/flags.go
+++ taikoxyz/taiko-geth/internal/debug/flags.go
@@ -87,8 +87,9 @@ Usage: "Prepends log messages with call-site location (file and line number)",
Category: flags.LoggingCategory,
}
logRotateFlag = &cli.BoolFlag{
- Name: "log.rotate",
- Usage: "Enables log file rotation",
+ Name: "log.rotate",
+ Usage: "Enables log file rotation",
+ Category: flags.LoggingCategory,
}
logMaxSizeMBsFlag = &cli.IntFlag{
Name: "log.maxsize",
diff --git ethereum/go-ethereum/internal/debug/loudpanic.go taikoxyz/taiko-geth/internal/debug/loudpanic.go
index 86e6bc88f83f2df82e60975aa967c29674614121..a7296e7b3f334b1a1d1a064177469323875610db 100644
--- ethereum/go-ethereum/internal/debug/loudpanic.go
+++ taikoxyz/taiko-geth/internal/debug/loudpanic.go
@@ -14,9 +14,6 @@ //
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-//go:build go1.6
-// +build go1.6
-
package debug
import "runtime/debug"
diff --git ethereum/go-ethereum/internal/debug/loudpanic_fallback.go taikoxyz/taiko-geth/internal/debug/loudpanic_fallback.go
deleted file mode 100644
index 377490e5bee571fa18bd97e8bb9502a42990fe46..0000000000000000000000000000000000000000
--- ethereum/go-ethereum/internal/debug/loudpanic_fallback.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-//go:build !go1.6
-// +build !go1.6
-
-package debug
-
-// LoudPanic panics in a way that gets all goroutine stacks printed on stderr.
-func LoudPanic(x interface{}) {
- panic(x)
-}
diff --git ethereum/go-ethereum/internal/debug/trace.go taikoxyz/taiko-geth/internal/debug/trace.go
index eea879823429b3107bac2d0e3e87a006f6cc9cb4..e291030b82e32bac23eaa1579add8350cb50c853 100644
--- ethereum/go-ethereum/internal/debug/trace.go
+++ taikoxyz/taiko-geth/internal/debug/trace.go
@@ -14,9 +14,6 @@ //
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-//go:build go1.5
-// +build go1.5
-
package debug
import (
diff --git ethereum/go-ethereum/internal/debug/trace_fallback.go taikoxyz/taiko-geth/internal/debug/trace_fallback.go
deleted file mode 100644
index ec07d991efd62d43a0164d9a0c26723c4d7cb0e0..0000000000000000000000000000000000000000
--- ethereum/go-ethereum/internal/debug/trace_fallback.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-//go:build !go1.5
-// +build !go1.5
-
-// no-op implementation of tracing methods for Go < 1.5.
-
-package debug
-
-import "errors"
-
-func (*HandlerT) StartGoTrace(string) error {
- return errors.New("tracing is not supported on Go < 1.5")
-}
-
-func (*HandlerT) StopGoTrace() error {
- return errors.New("tracing is not supported on Go < 1.5")
-}
diff --git ethereum/go-ethereum/internal/ethapi/api.go taikoxyz/taiko-geth/internal/ethapi/api.go
index 52dbaf164cd79c0e6799772adb2a8019149c2ba5..640693132e2bb6c34bcee6bfdce1044b21481834 100644
--- ethereum/go-ethereum/internal/ethapi/api.go
+++ taikoxyz/taiko-geth/internal/ethapi/api.go
@@ -46,6 +46,7 @@ "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/tyler-smith/go-bip39"
)
@@ -674,9 +675,6 @@ var (
keys = make([]common.Hash, len(storageKeys))
keyLengths = make([]int, len(storageKeys))
storageProof = make([]StorageResult, len(storageKeys))
- storageTrie state.Trie
- storageHash = types.EmptyRootHash
- codeHash = types.EmptyCodeHash
)
// Deserialize all keys. This prevents state access on invalid input.
for i, hexKey := range storageKeys {
@@ -686,61 +684,65 @@ if err != nil {
return nil, err
}
}
-
- state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
- if state == nil || err != nil {
- return nil, err
- }
- if storageTrie, err = state.StorageTrie(address); err != nil {
+ statedb, header, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
+ if statedb == nil || err != nil {
return nil, err
}
+ codeHash := statedb.GetCodeHash(address)
+ storageRoot := statedb.GetStorageRoot(address)
- // If we have a storageTrie, the account exists and we must update
- // the storage root hash and the code hash.
- if storageTrie != nil {
- storageHash = storageTrie.Hash()
- codeHash = state.GetCodeHash(address)
- }
- // Create the proofs for the storageKeys.
- for i, key := range keys {
- // Output key encoding is a bit special: if the input was a 32-byte hash, it is
- // returned as such. Otherwise, we apply the QUANTITY encoding mandated by the
- // JSON-RPC spec for getProof. This behavior exists to preserve backwards
- // compatibility with older client versions.
- var outputKey string
- if keyLengths[i] != 32 {
- outputKey = hexutil.EncodeBig(key.Big())
- } else {
- outputKey = hexutil.Encode(key[:])
+ if len(keys) > 0 {
+ var storageTrie state.Trie
+ if storageRoot != types.EmptyRootHash && storageRoot != (common.Hash{}) {
+ id := trie.StorageTrieID(header.Root, crypto.Keccak256Hash(address.Bytes()), storageRoot)
+ st, err := trie.NewStateTrie(id, statedb.Database().TrieDB())
+ if err != nil {
+ return nil, err
+ }
+ storageTrie = st
}
-
- if storageTrie == nil {
- storageProof[i] = StorageResult{outputKey, &hexutil.Big{}, []string{}}
- continue
+ // Create the proofs for the storageKeys.
+ for i, key := range keys {
+ // Output key encoding is a bit special: if the input was a 32-byte hash, it is
+ // returned as such. Otherwise, we apply the QUANTITY encoding mandated by the
+ // JSON-RPC spec for getProof. This behavior exists to preserve backwards
+ // compatibility with older client versions.
+ var outputKey string
+ if keyLengths[i] != 32 {
+ outputKey = hexutil.EncodeBig(key.Big())
+ } else {
+ outputKey = hexutil.Encode(key[:])
+ }
+ if storageTrie == nil {
+ storageProof[i] = StorageResult{outputKey, &hexutil.Big{}, []string{}}
+ continue
+ }
+ var proof proofList
+ if err := storageTrie.Prove(crypto.Keccak256(key.Bytes()), &proof); err != nil {
+ return nil, err
+ }
+ value := (*hexutil.Big)(statedb.GetState(address, key).Big())
+ storageProof[i] = StorageResult{outputKey, value, proof}
}
- var proof proofList
- if err := storageTrie.Prove(crypto.Keccak256(key.Bytes()), &proof); err != nil {
- return nil, err
- }
- value := (*hexutil.Big)(state.GetState(address, key).Big())
- storageProof[i] = StorageResult{outputKey, value, proof}
}
-
// Create the accountProof.
- accountProof, proofErr := state.GetProof(address)
- if proofErr != nil {
- return nil, proofErr
+ tr, err := trie.NewStateTrie(trie.StateTrieID(header.Root), statedb.Database().TrieDB())
+ if err != nil {
+ return nil, err
+ }
+ var accountProof proofList
+ if err := tr.Prove(crypto.Keccak256(address.Bytes()), &accountProof); err != nil {
+ return nil, err
}
-
return &AccountResult{
Address: address,
- AccountProof: toHexSlice(accountProof),
- Balance: (*hexutil.Big)(state.GetBalance(address)),
+ AccountProof: accountProof,
+ Balance: (*hexutil.Big)(statedb.GetBalance(address)),
CodeHash: codeHash,
- Nonce: hexutil.Uint64(state.GetNonce(address)),
- StorageHash: storageHash,
+ Nonce: hexutil.Uint64(statedb.GetNonce(address)),
+ StorageHash: storageRoot,
StorageProof: storageProof,
- }, state.Error()
+ }, statedb.Error()
}
// decodeHash parses a hex-encoded 32-byte hash. The input may optionally
@@ -897,6 +899,34 @@ res := state.GetState(address, key)
return res[:], state.Error()
}
+// GetBlockReceipts returns the block receipts for the given block hash or number or tag.
+func (s *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]map[string]interface{}, error) {
+ block, err := s.b.BlockByNumberOrHash(ctx, blockNrOrHash)
+ if block == nil || err != nil {
+ // When the block doesn't exist, the RPC method should return JSON null
+ // as per specification.
+ return nil, nil
+ }
+ receipts, err := s.b.GetReceipts(ctx, block.Hash())
+ if err != nil {
+ return nil, err
+ }
+ txs := block.Transactions()
+ if len(txs) != len(receipts) {
+ return nil, fmt.Errorf("receipts length mismatch: %d vs %d", len(txs), len(receipts))
+ }
+
+ // Derive the sender.
+ signer := types.MakeSigner(s.b.ChainConfig(), block.Number(), block.Time())
+
+ result := make([]map[string]interface{}, len(receipts))
+ for i, receipt := range receipts {
+ result[i] = marshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i)
+ }
+
+ return result, nil
+}
+
// OverrideAccount indicates the overriding fields of account during the execution
// of a message call.
// Note, state and stateDiff can't be specified at the same time. If state is
@@ -955,13 +985,14 @@ }
// BlockOverrides is a set of header fields to override.
type BlockOverrides struct {
- Number *hexutil.Big
- Difficulty *hexutil.Big
- Time *hexutil.Uint64
- GasLimit *hexutil.Uint64
- Coinbase *common.Address
- Random *common.Hash
- BaseFee *hexutil.Big
+ Number *hexutil.Big
+ Difficulty *hexutil.Big
+ Time *hexutil.Uint64
+ GasLimit *hexutil.Uint64
+ Coinbase *common.Address
+ Random *common.Hash
+ BaseFee *hexutil.Big
+ BlobBaseFee *hexutil.Big
}
// Apply overrides the given header fields into the given block context.
@@ -989,6 +1020,9 @@ blockCtx.Random = diff.Random
}
if diff.BaseFee != nil {
blockCtx.BaseFee = diff.BaseFee.ToInt()
+ }
+ if diff.BlobBaseFee != nil {
+ blockCtx.BlobBaseFee = diff.BlobBaseFee.ToInt()
}
}
@@ -1122,8 +1156,12 @@ // Additionally, the caller can specify a batch of contract for fields overriding.
//
// Note, this function doesn't make and changes in the state/blockchain and is
// useful to execute and retrieve values.
-func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides) (hexutil.Bytes, error) {
- result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, blockOverrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap())
+func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides) (hexutil.Bytes, error) {
+ if blockNrOrHash == nil {
+ latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
+ blockNrOrHash = &latest
+ }
+ result, err := DoCall(ctx, s.b, args, *blockNrOrHash, overrides, blockOverrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap())
if err != nil {
return nil, err
}
@@ -1134,12 +1172,30 @@ }
return result.Return(), result.Err
}
+// executeEstimate is a helper that executes the transaction under a given gas limit and returns
+// true if the transaction fails for a reason that might be related to not enough gas. A non-nil
+// error means execution failed due to reasons unrelated to the gas limit.
+func executeEstimate(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, gasCap uint64, gasLimit uint64) (bool, *core.ExecutionResult, error) {
+ args.Gas = (*hexutil.Uint64)(&gasLimit)
+ result, err := doCall(ctx, b, args, state, header, nil, nil, 0, gasCap)
+ if err != nil {
+ if errors.Is(err, core.ErrIntrinsicGas) {
+ return true, nil, nil // Special case, raise gas limit
+ }
+ return true, nil, err // Bail out
+ }
+ return result.Failed(), result, nil
+}
+
+// DoEstimateGas returns the lowest possible gas limit that allows the transaction to run
+// successfully at block `blockNrOrHash`. It returns error if the transaction would revert, or if
+// there are unexpected failures. The gas limit is capped by both `args.Gas` (if non-nil &
+// non-zero) and `gasCap` (if non-zero).
func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, gasCap uint64) (hexutil.Uint64, error) {
- // Binary search the gas requirement, as it may be higher than the amount used
+ // Binary search the gas limit, as it may need to be higher than the amount used
var (
- lo uint64 = params.TxGas - 1
- hi uint64
- cap uint64
+ lo uint64 // lowest-known gas limit where tx execution fails
+ hi uint64 // lowest-known gas limit where tx execution succeeds
)
// Use zero address if sender unspecified.
if args.From == nil {
@@ -1170,16 +1226,17 @@ feeCap = args.MaxFeePerGas.ToInt()
} else {
feeCap = common.Big0
}
+
+ state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
+ if state == nil || err != nil {
+ return 0, err
+ }
+ if err := overrides.Apply(state); err != nil {
+ return 0, err
+ }
+
// Recap the highest gas limit with account's available balance.
if feeCap.BitLen() != 0 {
- state, _, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
- if err != nil {
- return 0, err
- }
- err = overrides.Apply(state)
- if err != nil {
- return 0, err
- }
balance := state.GetBalance(*args.From) // from can't be nil
available := new(big.Int).Set(balance)
if args.Value != nil {
@@ -1206,39 +1263,42 @@ if gasCap != 0 && hi > gasCap {
log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap)
hi = gasCap
}
- cap = hi
- // Create a helper to check if a gas allowance results in an executable transaction
- executable := func(gas uint64, state *state.StateDB, header *types.Header) (bool, *core.ExecutionResult, error) {
- args.Gas = (*hexutil.Uint64)(&gas)
-
- result, err := doCall(ctx, b, args, state, header, nil, nil, 0, gasCap)
- if err != nil {
- if errors.Is(err, core.ErrIntrinsicGas) {
- return true, nil, nil // Special case, raise gas limit
+ // We first execute the transaction at the highest allowable gas limit, since if this fails we
+ // can return error immediately.
+ failed, result, err := executeEstimate(ctx, b, args, state.Copy(), header, gasCap, hi)
+ if err != nil {
+ return 0, err
+ }
+ if failed {
+ if result != nil && result.Err != vm.ErrOutOfGas {
+ if len(result.Revert()) > 0 {
+ return 0, newRevertError(result)
}
- return true, nil, err // Bail out
+ return 0, result.Err
}
- return result.Failed(), result, nil
+ return 0, fmt.Errorf("gas required exceeds allowance (%d)", hi)
}
- state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
- if state == nil || err != nil {
- return 0, err
- }
- err = overrides.Apply(state)
- if err != nil {
- return 0, err
- }
- // Execute the binary search and hone in on an executable gas limit
+ // For almost any transaction, the gas consumed by the unconstrained execution above
+ // lower-bounds the gas limit required for it to succeed. One exception is those txs that
+ // explicitly check gas remaining in order to successfully execute within a given limit, but we
+ // probably don't want to return a lowest possible gas limit for these cases anyway.
+ lo = result.UsedGas - 1
+
+ // Binary search for the smallest gas limit that allows the tx to execute successfully.
for lo+1 < hi {
- s := state.Copy()
mid := (hi + lo) / 2
- failed, _, err := executable(mid, s, header)
-
- // If the error is not nil(consensus error), it means the provided message
- // call or transaction will never be accepted no matter how much gas it is
- // assigned. Return the error directly, don't struggle any more.
+ if mid > lo*2 {
+ // Most txs don't need much higher gas limit than their gas used, and most txs don't
+ // require near the full block limit of gas, so the selection of where to bisect the
+ // range here is skewed to favor the low side.
+ mid = lo * 2
+ }
+ failed, _, err = executeEstimate(ctx, b, args, state.Copy(), header, gasCap, mid)
if err != nil {
+ // This should not happen under normal conditions since if we make it this far the
+ // transaction had run without error at least once before.
+ log.Error("execution error in estimate gas", "err", err)
return 0, err
}
if failed {
@@ -1247,28 +1307,14 @@ } else {
hi = mid
}
}
- // Reject the transaction as invalid if it still fails at the highest allowance
- if hi == cap {
- failed, result, err := executable(hi, state, header)
- if err != nil {
- return 0, err
- }
- if failed {
- if result != nil && result.Err != vm.ErrOutOfGas {
- if len(result.Revert()) > 0 {
- return 0, newRevertError(result)
- }
- return 0, result.Err
- }
- // Otherwise, the specified gas cap is too low
- return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap)
- }
- }
return hexutil.Uint64(hi), nil
}
-// EstimateGas returns an estimate of the amount of gas needed to execute the
-// given transaction against the current pending block.
+// EstimateGas returns the lowest possible gas limit that allows the transaction to run
+// successfully at block `blockNrOrHash`, or the latest block if `blockNrOrHash` is unspecified. It
+// returns error if the transaction would revert or if there are unexpected failures. The returned
+// value is capped by both `args.Gas` (if non-nil & non-zero) and the backend's RPCGasCap
+// configuration (if non-zero).
func (s *BlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Uint64, error) {
bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
if blockNrOrHash != nil {
@@ -1297,15 +1343,21 @@ "timestamp": hexutil.Uint64(head.Time),
"transactionsRoot": head.TxHash,
"receiptsRoot": head.ReceiptHash,
}
-
if head.BaseFee != nil {
result["baseFeePerGas"] = (*hexutil.Big)(head.BaseFee)
}
-
if head.WithdrawalsHash != nil {
result["withdrawalsRoot"] = head.WithdrawalsHash
}
-
+ if head.BlobGasUsed != nil {
+ result["blobGasUsed"] = hexutil.Uint64(*head.BlobGasUsed)
+ }
+ if head.ExcessBlobGas != nil {
+ result["excessBlobGas"] = hexutil.Uint64(*head.ExcessBlobGas)
+ }
+ if head.ParentBeaconRoot != nil {
+ result["parentBeaconBlockRoot"] = head.ParentBeaconRoot
+ }
return result
}
@@ -1364,26 +1416,28 @@ }
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
type RPCTransaction struct {
- BlockHash *common.Hash `json:"blockHash"`
- BlockNumber *hexutil.Big `json:"blockNumber"`
- From common.Address `json:"from"`
- Gas hexutil.Uint64 `json:"gas"`
- GasPrice *hexutil.Big `json:"gasPrice"`
- GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"`
- GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"`
- Hash common.Hash `json:"hash"`
- Input hexutil.Bytes `json:"input"`
- Nonce hexutil.Uint64 `json:"nonce"`
- To *common.Address `json:"to"`
- TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
- Value *hexutil.Big `json:"value"`
- Type hexutil.Uint64 `json:"type"`
- Accesses *types.AccessList `json:"accessList,omitempty"`
- ChainID *hexutil.Big `json:"chainId,omitempty"`
- V *hexutil.Big `json:"v"`
- R *hexutil.Big `json:"r"`
- S *hexutil.Big `json:"s"`
- YParity *hexutil.Uint64 `json:"yParity,omitempty"`
+ BlockHash *common.Hash `json:"blockHash"`
+ BlockNumber *hexutil.Big `json:"blockNumber"`
+ From common.Address `json:"from"`
+ Gas hexutil.Uint64 `json:"gas"`
+ GasPrice *hexutil.Big `json:"gasPrice"`
+ GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"`
+ GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"`
+ MaxFeePerBlobGas *hexutil.Big `json:"maxFeePerBlobGas,omitempty"`
+ Hash common.Hash `json:"hash"`
+ Input hexutil.Bytes `json:"input"`
+ Nonce hexutil.Uint64 `json:"nonce"`
+ To *common.Address `json:"to"`
+ TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
+ Value *hexutil.Big `json:"value"`
+ Type hexutil.Uint64 `json:"type"`
+ Accesses *types.AccessList `json:"accessList,omitempty"`
+ ChainID *hexutil.Big `json:"chainId,omitempty"`
+ BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"`
+ V *hexutil.Big `json:"v"`
+ R *hexutil.Big `json:"r"`
+ S *hexutil.Big `json:"s"`
+ YParity *hexutil.Uint64 `json:"yParity,omitempty"`
}
// newRPCTransaction returns a transaction that will serialize to the RPC
@@ -1437,15 +1491,43 @@ result.GasTipCap = (*hexutil.Big)(tx.GasTipCap())
// if the transaction has been mined, compute the effective gas price
if baseFee != nil && blockHash != (common.Hash{}) {
// price = min(gasTipCap + baseFee, gasFeeCap)
- price := math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee), tx.GasFeeCap())
- result.GasPrice = (*hexutil.Big)(price)
+ result.GasPrice = (*hexutil.Big)(effectiveGasPrice(tx, baseFee))
} else {
result.GasPrice = (*hexutil.Big)(tx.GasFeeCap())
}
+
+ case types.BlobTxType:
+ al := tx.AccessList()
+ yparity := hexutil.Uint64(v.Sign())
+ result.Accesses = &al
+ result.ChainID = (*hexutil.Big)(tx.ChainId())
+ result.YParity = &yparity
+ result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap())
+ result.GasTipCap = (*hexutil.Big)(tx.GasTipCap())
+ // if the transaction has been mined, compute the effective gas price
+ if baseFee != nil && blockHash != (common.Hash{}) {
+ result.GasPrice = (*hexutil.Big)(effectiveGasPrice(tx, baseFee))
+ } else {
+ result.GasPrice = (*hexutil.Big)(tx.GasFeeCap())
+ }
+ result.MaxFeePerBlobGas = (*hexutil.Big)(tx.BlobGasFeeCap())
+ result.BlobVersionedHashes = tx.BlobHashes()
}
return result
}
+// effectiveGasPrice computes the transaction gas fee, based on the given basefee value.
+//
+// price = min(gasTipCap + baseFee, gasFeeCap)
+func effectiveGasPrice(tx *types.Transaction, baseFee *big.Int) *big.Int {
+ fee := tx.GasTipCap()
+ fee = fee.Add(fee, baseFee)
+ if tx.GasFeeCapIntCmp(fee) < 0 {
+ return tx.GasFeeCap()
+ }
+ return fee
+}
+
// NewRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation
func NewRPCPendingTransaction(tx *types.Transaction, current *types.Header, config *params.ChainConfig) *RPCTransaction {
var (
@@ -1717,13 +1799,18 @@ receipt := receipts[index]
// Derive the sender.
signer := types.MakeSigner(s.b.ChainConfig(), header.Number, header.Time)
+ return marshalReceipt(receipt, blockHash, blockNumber, signer, tx, int(index)), nil
+}
+
+// marshalReceipt marshals a transaction receipt into a JSON object.
+func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int) map[string]interface{} {
from, _ := types.Sender(signer, tx)
fields := map[string]interface{}{
"blockHash": blockHash,
"blockNumber": hexutil.Uint64(blockNumber),
- "transactionHash": hash,
- "transactionIndex": hexutil.Uint64(index),
+ "transactionHash": tx.Hash(),
+ "transactionIndex": hexutil.Uint64(txIndex),
"from": from,
"to": tx.To(),
"gasUsed": hexutil.Uint64(receipt.GasUsed),
@@ -1745,11 +1832,16 @@ if receipt.Logs == nil {
fields["logs"] = []*types.Log{}
}
+ if tx.Type() == types.BlobTxType {
+ fields["blobGasUsed"] = hexutil.Uint64(receipt.BlobGasUsed)
+ fields["blobGasPrice"] = (*hexutil.Big)(receipt.BlobGasPrice)
+ }
+
// If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation
if receipt.ContractAddress != (common.Address{}) {
fields["contractAddress"] = receipt.ContractAddress
}
- return fields, nil
+ return fields
}
// sign is a helper function that signs a transaction with the private key of the given address.
@@ -2097,20 +2189,23 @@ }
// ChaindbProperty returns leveldb properties of the key-value database.
func (api *DebugAPI) ChaindbProperty(property string) (string, error) {
- if property == "" {
- property = "leveldb.stats"
- } else if !strings.HasPrefix(property, "leveldb.") {
- property = "leveldb." + property
- }
return api.b.ChainDb().Stat(property)
}
// ChaindbCompact flattens the entire key-value database into a single level,
// removing all unused slots and merging all keys.
func (api *DebugAPI) ChaindbCompact() error {
- for b := byte(0); b < 255; b++ {
- log.Info("Compacting chain database", "range", fmt.Sprintf("0x%0.2X-0x%0.2X", b, b+1))
- if err := api.b.ChainDb().Compact([]byte{b}, []byte{b + 1}); err != nil {
+ cstart := time.Now()
+ for b := 0; b <= 255; b++ {
+ var (
+ start = []byte{byte(b)}
+ end = []byte{byte(b + 1)}
+ )
+ if b == 255 {
+ end = nil
+ }
+ log.Info("Compacting database", "range", fmt.Sprintf("%#X-%#X", start, end), "elapsed", common.PrettyDuration(time.Since(cstart)))
+ if err := api.b.ChainDb().Compact(start, end); err != nil {
log.Error("Database compaction failed", "err", err)
return err
}
@@ -2163,12 +2258,3 @@ return fmt.Errorf("tx fee (%.2f ether) exceeds the configured cap (%.2f ether)", feeFloat, cap)
}
return nil
}
-
-// toHexSlice creates a slice of hex-strings based on []byte.
-func toHexSlice(b [][]byte) []string {
- r := make([]string, len(b))
- for i := range b {
- r[i] = hexutil.Encode(b[i])
- }
- return r
-}
diff --git ethereum/go-ethereum/internal/ethapi/api_test.go taikoxyz/taiko-geth/internal/ethapi/api_test.go
index b0877dc37849fab709a8d3b1d07ea0864592a37e..59882cd6bb5438040e8aa0fed227001cd93cb306 100644
--- ethereum/go-ethereum/internal/ethapi/api_test.go
+++ taikoxyz/taiko-geth/internal/ethapi/api_test.go
@@ -23,6 +23,8 @@ "encoding/json"
"errors"
"fmt"
"math/big"
+ "os"
+ "path/filepath"
"reflect"
"testing"
"time"
@@ -32,6 +34,7 @@ "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
@@ -45,19 +48,18 @@ "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/blocktest"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/stretchr/testify/assert"
+ "github.com/holiman/uint256"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
)
-func TestTransaction_RoundTripRpcJSON(t *testing.T) {
+func testTransactionMarshal(t *testing.T, tests []txData, config *params.ChainConfig) {
+ t.Parallel()
var (
- config = params.AllEthashProtocolChanges
signer = types.LatestSigner(config)
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- tests = allTransactionTypes(common.Address{0xde, 0xad}, config)
)
- t.Parallel()
+
for i, tt := range tests {
var tx2 types.Transaction
tx, err := types.SignNewTx(key, signer, tt.Tx)
@@ -86,6 +88,23 @@ want, have := tt.Want, string(data)
require.JSONEqf(t, want, have, "test %d: rpc json not match, want %s have %s", i, want, have)
}
}
+}
+
+func TestTransaction_RoundTripRpcJSON(t *testing.T) {
+ var (
+ config = params.AllEthashProtocolChanges
+ tests = allTransactionTypes(common.Address{0xde, 0xad}, config)
+ )
+ testTransactionMarshal(t, tests, config)
+}
+
+func TestTransactionBlobTx(t *testing.T) {
+ config := *params.TestChainConfig
+ config.ShanghaiTime = new(uint64)
+ config.CancunTime = new(uint64)
+ tests := allBlobTxs(common.Address{0xde, 0xad}, &config)
+
+ testTransactionMarshal(t, tests, &config)
}
type txData struct {
@@ -338,15 +357,60 @@ },
}
}
+func allBlobTxs(addr common.Address, config *params.ChainConfig) []txData {
+ return []txData{
+ {
+ Tx: &types.BlobTx{
+ Nonce: 6,
+ GasTipCap: uint256.NewInt(1),
+ GasFeeCap: uint256.NewInt(5),
+ Gas: 6,
+ To: addr,
+ BlobFeeCap: uint256.NewInt(1),
+ BlobHashes: []common.Hash{{1}},
+ Value: new(uint256.Int),
+ V: uint256.NewInt(32),
+ R: uint256.NewInt(10),
+ S: uint256.NewInt(11),
+ },
+ Want: `{
+ "blockHash": null,
+ "blockNumber": null,
+ "from": "0x71562b71999873db5b286df957af199ec94617f7",
+ "gas": "0x6",
+ "gasPrice": "0x5",
+ "maxFeePerGas": "0x5",
+ "maxPriorityFeePerGas": "0x1",
+ "maxFeePerBlobGas": "0x1",
+ "hash": "0x1f2b59a20e61efc615ad0cbe936379d6bbea6f938aafaf35eb1da05d8e7f46a3",
+ "input": "0x",
+ "nonce": "0x6",
+ "to": "0xdead000000000000000000000000000000000000",
+ "transactionIndex": null,
+ "value": "0x0",
+ "type": "0x3",
+ "accessList": [],
+ "chainId": "0x1",
+ "blobVersionedHashes": [
+ "0x0100000000000000000000000000000000000000000000000000000000000000"
+ ],
+ "v": "0x0",
+ "r": "0x618be8908e0e5320f8f3b48042a079fe5a335ebd4ed1422a7d2207cd45d872bc",
+ "s": "0x27b2bc6c80e849a8e8b764d4549d8c2efac3441e73cf37054eb0a9b9f8e89b27",
+ "yParity": "0x0"
+ }`,
+ },
+ }
+}
+
type testBackend struct {
db ethdb.Database
chain *core.BlockChain
pending *types.Block
}
-func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend {
+func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.Engine, generator func(i int, b *core.BlockGen)) *testBackend {
var (
- engine = ethash.NewFaker()
cacheConfig = &core.CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
@@ -550,7 +614,7 @@ genBlocks = 10
signer = types.HomesteadSigner{}
randomAccounts = newAccounts(2)
)
- api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
+ api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) {
// Transfer from account[0] to account[1]
// value: 1000 wei
// fee: 0 wei
@@ -653,7 +717,7 @@ }
genBlocks = 10
signer = types.HomesteadSigner{}
)
- api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
+ api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) {
// Transfer from account[0] to account[1]
// value: 1000 wei
// fee: 0 wei
@@ -782,7 +846,7 @@ want: "0x000000000000000000000000000000000000000000000000000000000000000b",
},
}
for i, tc := range testSuite {
- result, err := api.Call(context.Background(), tc.call, rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides)
+ result, err := api.Call(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides)
if tc.expectErr != nil {
if err == nil {
t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr)
@@ -1036,7 +1100,7 @@ if err != nil {
t.Errorf("test %d: json marshal error: %v", i, err)
continue
}
- assert.JSONEqf(t, tc.want, string(out), "test %d", i)
+ require.JSONEqf(t, tc.want, string(out), "test %d", i)
}
}
@@ -1074,7 +1138,7 @@ Amount: 10,
}
pending = types.NewBlockWithWithdrawals(&types.Header{Number: big.NewInt(11), Time: 42}, []*types.Transaction{tx}, nil, nil, []*types.Withdrawal{withdrawal}, blocktest.NewHasher())
)
- backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
+ backend := newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) {
// Transfer from account[0] to account[1]
// value: 1000 wei
// fee: 0 wei
@@ -1099,628 +1163,156 @@ blockNumber rpc.BlockNumber
blockHash *common.Hash
fullTx bool
reqHeader bool
- want string
+ file string
expectErr error
}{
// 0. latest header
{
blockNumber: rpc.LatestBlockNumber,
reqHeader: true,
- want: `{
- "baseFeePerGas": "0xfdc7303",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0x97f540a3577c0f645c5dada5da86f38350e8f847e71f21124f917835003e2607",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0xa",
- "parentHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "stateRoot": "0xbb62872e4023fa8a8b17b9cc37031f4817d9595779748d01cba408b495707a91",
- "timestamp": "0x64",
- "totalDifficulty": "0x1",
- "transactionsRoot": "0xb0893d21a4a44dc26a962a6e91abae66df87fb61ac9c60e936aee89c76331445"
- }`,
+ file: "tag-latest",
},
// 1. genesis header
{
blockNumber: rpc.BlockNumber(0),
reqHeader: true,
- want: `{
- "baseFeePerGas": "0x3b9aca00",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x0",
- "hash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x0",
- "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2",
- "timestamp": "0x0",
- "totalDifficulty": "0x1",
- "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
- }`,
+ file: "number-0",
},
// 2. #1 header
{
blockNumber: rpc.BlockNumber(1),
reqHeader: true,
- want: `{
- "baseFeePerGas": "0x342770c0",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0x0da274b315de8e4d5bf8717218ec43540464ef36378cb896469bb731e1d3f3cb",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x1",
- "parentHash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "stateRoot": "0x92c5c55a698963f5b06e3aee415630f5c48b0760e537af94917ce9c4f42a2e22",
- "timestamp": "0xa",
- "totalDifficulty": "0x1",
- "transactionsRoot": "0xca0ebcce920d2cdfbf9e1dbe90ed3441a1a576f344bd80e60508da814916f4e7"
- }`,
+ file: "number-1",
},
// 3. latest-1 header
{
blockNumber: rpc.BlockNumber(9),
reqHeader: true,
- want: `{
- "baseFeePerGas": "0x121a9cca",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x9",
- "parentHash": "0x5abd19c39d9f1c6e52998e135ea14e1fbc5db3fa2a108f4538e238ca5c2e68d7",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "stateRoot": "0xbd4aa2c2873df709151075250a8c01c9a14d2b0e2f715dbdd16e0ef8030c2cf0",
- "timestamp": "0x5a",
- "totalDifficulty": "0x1",
- "transactionsRoot": "0x0767ed8359337dc6a8fdc77fe52db611bed1be87aac73c4556b1bf1dd3d190a5"
- }`,
+ file: "number-latest-1",
},
// 4. latest+1 header
{
blockNumber: rpc.BlockNumber(11),
reqHeader: true,
- want: "null",
+ file: "number-latest+1",
},
// 5. pending header
{
blockNumber: rpc.PendingBlockNumber,
reqHeader: true,
- want: `{
- "difficulty": "0x0",
- "extraData": "0x",
- "gasLimit": "0x0",
- "gasUsed": "0x0",
- "hash": null,
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": null,
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": null,
- "number": "0xb",
- "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "timestamp": "0x2a",
- "totalDifficulty": null,
- "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37",
- "withdrawalsRoot": "0x73d756269cdfc22e7e17a3548e36f42f750ca06d7e3cd98d1b6d0eb5add9dc84"
- }`,
+ file: "tag-pending",
},
// 6. latest block
{
blockNumber: rpc.LatestBlockNumber,
- want: `{
- "baseFeePerGas": "0xfdc7303",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0x97f540a3577c0f645c5dada5da86f38350e8f847e71f21124f917835003e2607",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0xa",
- "parentHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x26a",
- "stateRoot": "0xbb62872e4023fa8a8b17b9cc37031f4817d9595779748d01cba408b495707a91",
- "timestamp": "0x64",
- "totalDifficulty": "0x1",
- "transactions": [
- "0x3ee4094ca1e0b07a66dd616a057e081e53144ca7e9685a126fd4dda9ca042644"
- ],
- "transactionsRoot": "0xb0893d21a4a44dc26a962a6e91abae66df87fb61ac9c60e936aee89c76331445",
- "uncles": []
- }`,
+ file: "tag-latest",
},
// 7. genesis block
{
blockNumber: rpc.BlockNumber(0),
- want: `{
- "baseFeePerGas": "0x3b9aca00",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x0",
- "hash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x0",
- "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x200",
- "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2",
- "timestamp": "0x0",
- "totalDifficulty": "0x1",
- "transactions": [],
- "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "uncles": []
- }`,
+ file: "number-0",
},
// 8. #1 block
{
blockNumber: rpc.BlockNumber(1),
- want: `{
- "baseFeePerGas": "0x342770c0",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0x0da274b315de8e4d5bf8717218ec43540464ef36378cb896469bb731e1d3f3cb",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x1",
- "parentHash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x26a",
- "stateRoot": "0x92c5c55a698963f5b06e3aee415630f5c48b0760e537af94917ce9c4f42a2e22",
- "timestamp": "0xa",
- "totalDifficulty": "0x1",
- "transactions": [
- "0x644a31c354391520d00e95b9affbbb010fc79ac268144ab8e28207f4cf51097e"
- ],
- "transactionsRoot": "0xca0ebcce920d2cdfbf9e1dbe90ed3441a1a576f344bd80e60508da814916f4e7",
- "uncles": []
- }`,
+ file: "number-1",
},
// 9. latest-1 block
{
blockNumber: rpc.BlockNumber(9),
fullTx: true,
- want: `{
- "baseFeePerGas": "0x121a9cca",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x9",
- "parentHash": "0x5abd19c39d9f1c6e52998e135ea14e1fbc5db3fa2a108f4538e238ca5c2e68d7",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x26a",
- "stateRoot": "0xbd4aa2c2873df709151075250a8c01c9a14d2b0e2f715dbdd16e0ef8030c2cf0",
- "timestamp": "0x5a",
- "totalDifficulty": "0x1",
- "transactions": [
- {
- "blockHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e",
- "blockNumber": "0x9",
- "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
- "gas": "0x5208",
- "gasPrice": "0x121a9cca",
- "hash": "0xecd155a61a5734b3efab75924e3ae34026c7c4133d8c2a46122bd03d7d199725",
- "input": "0x",
- "nonce": "0x8",
- "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
- "transactionIndex": "0x0",
- "value": "0x3e8",
- "type": "0x0",
- "v": "0x1b",
- "r": "0xc6028b8e983d62fa8542f8a7633fb23cc941be2c897134352d95a7d9b19feafd",
- "s": "0xeb6adcaaae3bed489c6cce4435f9db05d23a52820c78bd350e31eec65ed809d"
- }
- ],
- "transactionsRoot": "0x0767ed8359337dc6a8fdc77fe52db611bed1be87aac73c4556b1bf1dd3d190a5",
- "uncles": []
- }`,
+ file: "number-latest-1",
},
// 10. latest+1 block
{
blockNumber: rpc.BlockNumber(11),
fullTx: true,
- want: "null",
+ file: "number-latest+1",
},
// 11. pending block
{
blockNumber: rpc.PendingBlockNumber,
- want: `{
- "difficulty": "0x0",
- "extraData": "0x",
- "gasLimit": "0x0",
- "gasUsed": "0x0",
- "hash": null,
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": null,
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": null,
- "number": "0xb",
- "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x256",
- "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "timestamp": "0x2a",
- "totalDifficulty": null,
- "transactions": [
- "0x4afee081df5dff7a025964032871f7d4ba4d21baf5f6376a2f4a9f79fc506298"
- ],
- "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37",
- "withdrawals": [
- {
- "index": "0x0",
- "validatorIndex": "0x1",
- "address": "0x1234000000000000000000000000000000000000",
- "amount": "0xa"
- }
- ],
- "withdrawalsRoot": "0x73d756269cdfc22e7e17a3548e36f42f750ca06d7e3cd98d1b6d0eb5add9dc84",
- "uncles": []
- }`,
+ file: "tag-pending",
},
// 12. pending block + fullTx
{
blockNumber: rpc.PendingBlockNumber,
fullTx: true,
- want: `{
- "difficulty": "0x0",
- "extraData": "0x",
- "gasLimit": "0x0",
- "gasUsed": "0x0",
- "hash": null,
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": null,
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": null,
- "number": "0xb",
- "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x256",
- "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "timestamp": "0x2a",
- "totalDifficulty": null,
- "transactions": [
- {
- "blockHash": "0x6cebd9f966ea686f44b981685e3f0eacea28591a7a86d7fbbe521a86e9f81165",
- "blockNumber": "0xb",
- "from": "0x0000000000000000000000000000000000000000",
- "gas": "0x457",
- "gasPrice": "0x2b67",
- "hash": "0x4afee081df5dff7a025964032871f7d4ba4d21baf5f6376a2f4a9f79fc506298",
- "input": "0x111111",
- "nonce": "0xb",
- "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
- "transactionIndex": "0x0",
- "value": "0x6f",
- "type": "0x0",
- "chainId": "0x7fffffffffffffee",
- "v": "0x0",
- "r": "0x0",
- "s": "0x0"
- }
- ],
- "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37",
- "uncles": [],
- "withdrawals": [
- {
- "index": "0x0",
- "validatorIndex": "0x1",
- "address": "0x1234000000000000000000000000000000000000",
- "amount": "0xa"
- }
- ],
- "withdrawalsRoot": "0x73d756269cdfc22e7e17a3548e36f42f750ca06d7e3cd98d1b6d0eb5add9dc84"
- }`,
+ file: "tag-pending-fullTx",
},
// 13. latest header by hash
{
blockHash: &blockHashes[len(blockHashes)-1],
reqHeader: true,
- want: `{
- "baseFeePerGas": "0xfdc7303",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0x97f540a3577c0f645c5dada5da86f38350e8f847e71f21124f917835003e2607",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0xa",
- "parentHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "stateRoot": "0xbb62872e4023fa8a8b17b9cc37031f4817d9595779748d01cba408b495707a91",
- "timestamp": "0x64",
- "totalDifficulty": "0x1",
- "transactionsRoot": "0xb0893d21a4a44dc26a962a6e91abae66df87fb61ac9c60e936aee89c76331445"
- }`,
+ file: "hash-latest",
},
// 14. genesis header by hash
{
blockHash: &blockHashes[0],
reqHeader: true,
- want: `{
- "baseFeePerGas": "0x3b9aca00",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x0",
- "hash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x0",
- "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2",
- "timestamp": "0x0",
- "totalDifficulty": "0x1",
- "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
- }`,
+ file: "hash-0",
},
// 15. #1 header
{
blockHash: &blockHashes[1],
reqHeader: true,
- want: `{
- "baseFeePerGas": "0x342770c0",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0x0da274b315de8e4d5bf8717218ec43540464ef36378cb896469bb731e1d3f3cb",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x1",
- "parentHash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "stateRoot": "0x92c5c55a698963f5b06e3aee415630f5c48b0760e537af94917ce9c4f42a2e22",
- "timestamp": "0xa",
- "totalDifficulty": "0x1",
- "transactionsRoot": "0xca0ebcce920d2cdfbf9e1dbe90ed3441a1a576f344bd80e60508da814916f4e7"
- }`,
+ file: "hash-1",
},
// 16. latest-1 header
{
blockHash: &blockHashes[len(blockHashes)-2],
reqHeader: true,
- want: `{
- "baseFeePerGas": "0x121a9cca",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x9",
- "parentHash": "0x5abd19c39d9f1c6e52998e135ea14e1fbc5db3fa2a108f4538e238ca5c2e68d7",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "stateRoot": "0xbd4aa2c2873df709151075250a8c01c9a14d2b0e2f715dbdd16e0ef8030c2cf0",
- "timestamp": "0x5a",
- "totalDifficulty": "0x1",
- "transactionsRoot": "0x0767ed8359337dc6a8fdc77fe52db611bed1be87aac73c4556b1bf1dd3d190a5"
- }`,
+ file: "hash-latest-1",
},
// 17. empty hash
{
blockHash: &common.Hash{},
reqHeader: true,
- want: "null",
+ file: "hash-empty",
},
// 18. pending hash
{
blockHash: &pendingHash,
reqHeader: true,
- want: `null`,
+ file: `hash-pending`,
},
// 19. latest block
{
blockHash: &blockHashes[len(blockHashes)-1],
- want: `{
- "baseFeePerGas": "0xfdc7303",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0x97f540a3577c0f645c5dada5da86f38350e8f847e71f21124f917835003e2607",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0xa",
- "parentHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x26a",
- "stateRoot": "0xbb62872e4023fa8a8b17b9cc37031f4817d9595779748d01cba408b495707a91",
- "timestamp": "0x64",
- "totalDifficulty": "0x1",
- "transactions": [
- "0x3ee4094ca1e0b07a66dd616a057e081e53144ca7e9685a126fd4dda9ca042644"
- ],
- "transactionsRoot": "0xb0893d21a4a44dc26a962a6e91abae66df87fb61ac9c60e936aee89c76331445",
- "uncles": []
- }`,
+ file: "hash-latest",
},
// 20. genesis block
{
blockHash: &blockHashes[0],
- want: `{
- "baseFeePerGas": "0x3b9aca00",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x0",
- "hash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x0",
- "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x200",
- "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2",
- "timestamp": "0x0",
- "totalDifficulty": "0x1",
- "transactions": [],
- "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
- "uncles": []
- }`,
+ file: "hash-genesis",
},
// 21. #1 block
{
blockHash: &blockHashes[1],
- want: `{
- "baseFeePerGas": "0x342770c0",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0x0da274b315de8e4d5bf8717218ec43540464ef36378cb896469bb731e1d3f3cb",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x1",
- "parentHash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x26a",
- "stateRoot": "0x92c5c55a698963f5b06e3aee415630f5c48b0760e537af94917ce9c4f42a2e22",
- "timestamp": "0xa",
- "totalDifficulty": "0x1",
- "transactions": [
- "0x644a31c354391520d00e95b9affbbb010fc79ac268144ab8e28207f4cf51097e"
- ],
- "transactionsRoot": "0xca0ebcce920d2cdfbf9e1dbe90ed3441a1a576f344bd80e60508da814916f4e7",
- "uncles": []
- }`,
+ file: "hash-1",
},
// 22. latest-1 block
{
blockHash: &blockHashes[len(blockHashes)-2],
fullTx: true,
- want: `{
- "baseFeePerGas": "0x121a9cca",
- "difficulty": "0x20000",
- "extraData": "0x",
- "gasLimit": "0x47e7c4",
- "gasUsed": "0x5208",
- "hash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e",
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "miner": "0x0000000000000000000000000000000000000000",
- "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "nonce": "0x0000000000000000",
- "number": "0x9",
- "parentHash": "0x5abd19c39d9f1c6e52998e135ea14e1fbc5db3fa2a108f4538e238ca5c2e68d7",
- "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
- "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x26a",
- "stateRoot": "0xbd4aa2c2873df709151075250a8c01c9a14d2b0e2f715dbdd16e0ef8030c2cf0",
- "timestamp": "0x5a",
- "totalDifficulty": "0x1",
- "transactions": [
- {
- "blockHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e",
- "blockNumber": "0x9",
- "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
- "gas": "0x5208",
- "gasPrice": "0x121a9cca",
- "hash": "0xecd155a61a5734b3efab75924e3ae34026c7c4133d8c2a46122bd03d7d199725",
- "input": "0x",
- "nonce": "0x8",
- "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
- "transactionIndex": "0x0",
- "value": "0x3e8",
- "type": "0x0",
- "v": "0x1b",
- "r": "0xc6028b8e983d62fa8542f8a7633fb23cc941be2c897134352d95a7d9b19feafd",
- "s": "0xeb6adcaaae3bed489c6cce4435f9db05d23a52820c78bd350e31eec65ed809d"
- }
- ],
- "transactionsRoot": "0x0767ed8359337dc6a8fdc77fe52db611bed1be87aac73c4556b1bf1dd3d190a5",
- "uncles": []
- }`,
+ file: "hash-latest-1-fullTx",
},
// 23. empty hash + body
{
blockHash: &common.Hash{},
fullTx: true,
- want: "null",
+ file: "hash-empty-fullTx",
},
// 24. pending block
{
blockHash: &pendingHash,
- want: `null`,
+ file: `hash-pending`,
},
// 25. pending block + fullTx
{
blockHash: &pendingHash,
fullTx: true,
- want: `null`,
+ file: "hash-pending-fullTx",
},
}
@@ -1728,18 +1320,23 @@ for i, tt := range testSuite {
var (
result map[string]interface{}
err error
+ rpc string
)
if tt.blockHash != nil {
if tt.reqHeader {
result = api.GetHeaderByHash(context.Background(), *tt.blockHash)
+ rpc = "eth_getHeaderByHash"
} else {
result, err = api.GetBlockByHash(context.Background(), *tt.blockHash, tt.fullTx)
+ rpc = "eth_getBlockByHash"
}
} else {
if tt.reqHeader {
result, err = api.GetHeaderByNumber(context.Background(), tt.blockNumber)
+ rpc = "eth_getHeaderByNumber"
} else {
result, err = api.GetBlockByNumber(context.Background(), tt.blockNumber, tt.fullTx)
+ rpc = "eth_getBlockByNumber"
}
}
if tt.expectErr != nil {
@@ -1756,20 +1353,15 @@ if err != nil {
t.Errorf("test %d: want no error, have %v", i, err)
continue
}
- data, err := json.Marshal(result)
- if err != nil {
- t.Errorf("test %d: json marshal error", i)
- continue
- }
- want, have := tt.want, string(data)
- require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have)
+
+ testRPCResponseWithFile(t, i, result, rpc, tt.file)
}
}
-func TestRPCGetTransactionReceipt(t *testing.T) {
- t.Parallel()
-
- // Initialize test accounts
+func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Hash) {
+ config := *params.TestChainConfig
+ config.ShanghaiTime = new(uint64)
+ config.CancunTime = new(uint64)
var (
acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
@@ -1777,7 +1369,9 @@ acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey)
acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey)
contract = common.HexToAddress("0000000000000000000000000000000000031ec7")
genesis = &core.Genesis{
- Config: params.TestChainConfig,
+ Config: &config,
+ ExcessBlobGas: new(uint64),
+ BlobGasUsed: new(uint64),
Alloc: core.GenesisAlloc{
acc1Addr: {Balance: big.NewInt(params.Ether)},
acc2Addr: {Balance: big.NewInt(params.Ether)},
@@ -1794,11 +1388,14 @@ // }
contract: {Balance: big.NewInt(params.Ether), Code: common.FromHex("0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063a9059cbb14610030575b600080fd5b61004a6004803603810190610045919061016a565b610060565b60405161005791906101c5565b60405180910390f35b60008273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040516100bf91906101ef565b60405180910390a36001905092915050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000610101826100d6565b9050919050565b610111816100f6565b811461011c57600080fd5b50565b60008135905061012e81610108565b92915050565b6000819050919050565b61014781610134565b811461015257600080fd5b50565b6000813590506101648161013e565b92915050565b60008060408385031215610181576101806100d1565b5b600061018f8582860161011f565b92505060206101a085828601610155565b9150509250929050565b60008115159050919050565b6101bf816101aa565b82525050565b60006020820190506101da60008301846101b6565b92915050565b6101e981610134565b82525050565b600060208201905061020460008301846101e0565b9291505056fea2646970667358221220b469033f4b77b9565ee84e0a2f04d496b18160d26034d54f9487e57788fd36d564736f6c63430008120033")},
},
}
- genBlocks = 5
- signer = types.LatestSignerForChainID(params.TestChainConfig.ChainID)
- txHashes = make([]common.Hash, genBlocks)
+ signer = types.LatestSignerForChainID(params.TestChainConfig.ChainID)
+ txHashes = make([]common.Hash, genBlocks)
)
- backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
+
+ // Set the terminal total difficulty in the config
+ genesis.Config.TerminalTotalDifficulty = big.NewInt(0)
+ genesis.Config.TerminalTotalDifficultyPassed = true
+ backend := newTestBackend(t, genBlocks, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) {
var (
tx *types.Transaction
err error
@@ -1829,6 +1426,20 @@ Address: contract,
StorageKeys: []common.Hash{{0}},
}}
tx, err = types.SignTx(types.NewTx(&types.AccessListTx{Nonce: uint64(i), To: nil, Gas: 58100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040"), AccessList: accessList}), signer, acc1Key)
+ case 5:
+ // blob tx
+ fee := big.NewInt(500)
+ fee.Add(fee, b.BaseFee())
+ tx, err = types.SignTx(types.NewTx(&types.BlobTx{
+ Nonce: uint64(i),
+ GasTipCap: uint256.NewInt(1),
+ GasFeeCap: uint256.MustFromBig(fee),
+ Gas: params.TxGas,
+ To: acc2Addr,
+ BlobFeeCap: uint256.NewInt(1),
+ BlobHashes: []common.Hash{{1}},
+ Value: new(uint256.Int),
+ }), signer, acc1Key)
}
if err != nil {
t.Errorf("failed to sign tx: %v", err)
@@ -1837,147 +1448,62 @@ if tx != nil {
b.AddTx(tx)
txHashes[i] = tx.Hash()
}
+ b.SetPoS()
})
- api := NewTransactionAPI(backend, new(AddrLocker))
- blockHashes := make([]common.Hash, genBlocks+1)
- ctx := context.Background()
- for i := 0; i <= genBlocks; i++ {
- header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i))
- if err != nil {
- t.Errorf("failed to get block: %d err: %v", i, err)
- }
- blockHashes[i] = header.Hash()
- }
+ return backend, txHashes
+}
+
+func TestRPCGetTransactionReceipt(t *testing.T) {
+ t.Parallel()
+
+ var (
+ backend, txHashes = setupReceiptBackend(t, 6)
+ api = NewTransactionAPI(backend, new(AddrLocker))
+ )
var testSuite = []struct {
txHash common.Hash
- want string
+ file string
}{
// 0. normal success
{
txHash: txHashes[0],
- want: `{
- "blockHash": "0x1356e49a24d4504e450b303aa770f4ae13c29b9ffacaea1d7dd4043396229dd9",
- "blockNumber": "0x1",
- "contractAddress": null,
- "cumulativeGasUsed": "0x5208",
- "effectiveGasPrice": "0x342770c0",
- "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
- "gasUsed": "0x5208",
- "logs": [],
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "status": "0x1",
- "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
- "transactionHash": "0x644a31c354391520d00e95b9affbbb010fc79ac268144ab8e28207f4cf51097e",
- "transactionIndex": "0x0",
- "type": "0x0"
- }`,
+ file: "normal-transfer-tx",
},
// 1. create contract
{
txHash: txHashes[1],
- want: `{
- "blockHash": "0x4fc27a4efa7fb8faa04b12b53ec8c8424ab4c21aab1323846365f000e8b4a594",
- "blockNumber": "0x2",
- "contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592",
- "cumulativeGasUsed": "0xcf4e",
- "effectiveGasPrice": "0x2db16291",
- "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
- "gasUsed": "0xcf4e",
- "logs": [],
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "status": "0x1",
- "to": null,
- "transactionHash": "0x340e58cda5086495010b571fe25067fecc9954dc4ee3cedece00691fa3f5904a",
- "transactionIndex": "0x0",
- "type": "0x0"
- }`,
+ file: "create-contract-tx",
},
// 2. with logs success
{
txHash: txHashes[2],
- want: `{
- "blockHash": "0x73385c190219326907524b0020ef453ebc450eaa971ebce16f79e2d23e7e8d4d",
- "blockNumber": "0x3",
- "contractAddress": null,
- "cumulativeGasUsed": "0x5e28",
- "effectiveGasPrice": "0x281c2534",
- "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
- "gasUsed": "0x5e28",
- "logs": [
- {
- "address": "0x0000000000000000000000000000000000031ec7",
- "topics": [
- "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
- "0x000000000000000000000000703c4b2bd70c169f5717101caee543299fc946c7",
- "0x0000000000000000000000000000000000000000000000000000000000000003"
- ],
- "data": "0x000000000000000000000000000000000000000000000000000000000000000d",
- "blockNumber": "0x3",
- "transactionHash": "0x9dbf43ec9afc8d711932618616471088f66ba4f25fd5c672d97473d02dae967f",
- "transactionIndex": "0x0",
- "blockHash": "0x73385c190219326907524b0020ef453ebc450eaa971ebce16f79e2d23e7e8d4d",
- "logIndex": "0x0",
- "removed": false
- }
- ],
- "logsBloom": "0x00000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000800000000000000008000000000000000000000000000000000020000000080000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000400000000002000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000",
- "status": "0x1",
- "to": "0x0000000000000000000000000000000000031ec7",
- "transactionHash": "0x9dbf43ec9afc8d711932618616471088f66ba4f25fd5c672d97473d02dae967f",
- "transactionIndex": "0x0",
- "type": "0x0"
- }`,
+ file: "with-logs",
},
// 3. dynamic tx with logs success
{
txHash: txHashes[3],
- want: `{
- "blockHash": "0x77c3f8919590e0e68db4ce74a3da3140ac3e96dd3d078a48db1da4c08b07503d",
- "blockNumber": "0x4",
- "contractAddress": null,
- "cumulativeGasUsed": "0x538d",
- "effectiveGasPrice": "0x2325c3e8",
- "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
- "gasUsed": "0x538d",
- "logs": [],
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "status": "0x0",
- "to": "0x0000000000000000000000000000000000031ec7",
- "transactionHash": "0x672e3e39adf23b5656989b7a36e54d54004b1866f53871113bc52e137edb9faf",
- "transactionIndex": "0x0",
- "type": "0x2"
- }`,
+ file: `dynamic-tx-with-logs`,
},
// 4. access list tx with create contract
{
txHash: txHashes[4],
- want: `{
- "blockHash": "0x08e23d8e3711a21fbb8becd7de22fda8fb0a49fba14e1be763d00f99063627e1",
- "blockNumber": "0x5",
- "contractAddress": "0xfdaa97661a584d977b4d3abb5370766ff5b86a18",
- "cumulativeGasUsed": "0xe01a",
- "effectiveGasPrice": "0x1ecb3f75",
- "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
- "gasUsed": "0xe01a",
- "logs": [],
- "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- "status": "0x1",
- "to": null,
- "transactionHash": "0x8f3c4e2663af0312d508ebd8587f0c88dccbbc8a9bcc322421ff4bc28c456a92",
- "transactionIndex": "0x0",
- "type": "0x1"
- }`,
+ file: "create-contract-with-access-list",
},
// 5. txhash empty
{
txHash: common.Hash{},
- want: `null`,
+ file: "txhash-empty",
},
// 6. txhash not found
{
txHash: common.HexToHash("deadbeef"),
- want: `null`,
+ file: "txhash-notfound",
+ },
+ // 7. blob tx
+ {
+ txHash: txHashes[5],
+ file: "blob-tx",
},
}
@@ -1991,12 +1517,121 @@ if err != nil {
t.Errorf("test %d: want no error, have %v", i, err)
continue
}
- data, err := json.Marshal(result)
+ testRPCResponseWithFile(t, i, result, "eth_getTransactionReceipt", tt.file)
+ }
+}
+
+func TestRPCGetBlockReceipts(t *testing.T) {
+ t.Parallel()
+
+ var (
+ genBlocks = 6
+ backend, _ = setupReceiptBackend(t, genBlocks)
+ api = NewBlockChainAPI(backend)
+ )
+ blockHashes := make([]common.Hash, genBlocks+1)
+ ctx := context.Background()
+ for i := 0; i <= genBlocks; i++ {
+ header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i))
if err != nil {
- t.Errorf("test %d: json marshal error", i)
+ t.Errorf("failed to get block: %d err: %v", i, err)
+ }
+ blockHashes[i] = header.Hash()
+ }
+
+ var testSuite = []struct {
+ test rpc.BlockNumberOrHash
+ file string
+ }{
+ // 0. block without any txs(hash)
+ {
+ test: rpc.BlockNumberOrHashWithHash(blockHashes[0], false),
+ file: "number-0",
+ },
+ // 1. block without any txs(number)
+ {
+ test: rpc.BlockNumberOrHashWithNumber(0),
+ file: "number-1",
+ },
+ // 2. earliest tag
+ {
+ test: rpc.BlockNumberOrHashWithNumber(rpc.EarliestBlockNumber),
+ file: "tag-earliest",
+ },
+ // 3. latest tag
+ {
+ test: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber),
+ file: "tag-latest",
+ },
+ // 4. block with legacy transfer tx(hash)
+ {
+ test: rpc.BlockNumberOrHashWithHash(blockHashes[1], false),
+ file: "block-with-legacy-transfer-tx",
+ },
+ // 5. block with contract create tx(number)
+ {
+ test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(2)),
+ file: "block-with-contract-create-tx",
+ },
+ // 6. block with legacy contract call tx(hash)
+ {
+ test: rpc.BlockNumberOrHashWithHash(blockHashes[3], false),
+ file: "block-with-legacy-contract-call-tx",
+ },
+ // 7. block with dynamic fee tx(number)
+ {
+ test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(4)),
+ file: "block-with-dynamic-fee-tx",
+ },
+ // 8. block is empty
+ {
+ test: rpc.BlockNumberOrHashWithHash(common.Hash{}, false),
+ file: "hash-empty",
+ },
+ // 9. block is not found
+ {
+ test: rpc.BlockNumberOrHashWithHash(common.HexToHash("deadbeef"), false),
+ file: "hash-notfound",
+ },
+ // 10. block is not found
+ {
+ test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(genBlocks + 1)),
+ file: "block-notfound",
+ },
+ // 11. block with blob tx
+ {
+ test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(6)),
+ file: "block-with-blob-tx",
+ },
+ }
+
+ for i, tt := range testSuite {
+ var (
+ result interface{}
+ err error
+ )
+ result, err = api.GetBlockReceipts(context.Background(), tt.test)
+ if err != nil {
+ t.Errorf("test %d: want no error, have %v", i, err)
continue
}
- want, have := tt.want, string(data)
- require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have)
+ testRPCResponseWithFile(t, i, result, "eth_getBlockReceipts", tt.file)
}
}
+
+func testRPCResponseWithFile(t *testing.T, testid int, result interface{}, rpc string, file string) {
+ data, err := json.MarshalIndent(result, "", " ")
+ if err != nil {
+ t.Errorf("test %d: json marshal error", testid)
+ return
+ }
+ outputFile := filepath.Join("testdata", fmt.Sprintf("%s-%s.json", rpc, file))
+ if os.Getenv("WRITE_TEST_FILES") != "" {
+ os.WriteFile(outputFile, data, 0644)
+ }
+ want, err := os.ReadFile(outputFile)
+ if err != nil {
+ t.Fatalf("error reading expected test file: %s output: %v", outputFile, err)
+ }
+ require.JSONEqf(t, string(want), string(data), "test %d: json not match, want: %s, have: %s", testid, string(want), string(data))
+}
diff --git ethereum/go-ethereum/internal/flags/categories.go taikoxyz/taiko-geth/internal/flags/categories.go
index ac71931879b4e66f046a90b9ff55098360a62111..487684d98b3e4388fae941d96e9e7ffce6c1b214 100644
--- ethereum/go-ethereum/internal/flags/categories.go
+++ taikoxyz/taiko-geth/internal/flags/categories.go
@@ -22,7 +22,7 @@ const (
EthCategory = "ETHEREUM"
LightCategory = "LIGHT CLIENT"
DevCategory = "DEVELOPER CHAIN"
- EthashCategory = "ETHASH"
+ StateCategory = "STATE HISTORY MANAGEMENT"
TxPoolCategory = "TRANSACTION POOL (EVM)"
BlobPoolCategory = "TRANSACTION POOL (BLOB)"
PerfCategory = "PERFORMANCE TUNING"
diff --git ethereum/go-ethereum/internal/flags/flags.go taikoxyz/taiko-geth/internal/flags/flags.go
index b0756b4e0a1f9490ed865f488b6a7e4a798646d5..69e9743556b4164d61390055492b6295d51b599d 100644
--- ethereum/go-ethereum/internal/flags/flags.go
+++ taikoxyz/taiko-geth/internal/flags/flags.go
@@ -20,11 +20,13 @@ import (
"encoding"
"errors"
"flag"
+ "fmt"
"math/big"
"os"
"os/user"
"path/filepath"
"strings"
+ "syscall"
"github.com/ethereum/go-ethereum/common/math"
"github.com/urfave/cli/v2"
@@ -68,6 +70,7 @@
Value DirectoryString
Aliases []string
+ EnvVars []string
}
// For cli.Flag:
@@ -79,6 +82,14 @@
// Apply called by cli library, grabs variable from environment (if in env)
// and adds variable to flag set for parsing.
func (f *DirectoryFlag) Apply(set *flag.FlagSet) error {
+ for _, envVar := range f.EnvVars {
+ envVar = strings.TrimSpace(envVar)
+ if value, found := syscall.Getenv(envVar); found {
+ f.Value.Set(value)
+ f.HasBeenSet = true
+ break
+ }
+ }
eachName(f, func(name string) {
set.Var(&f.Value, f.Name, f.Usage)
})
@@ -102,7 +113,7 @@
func (f *DirectoryFlag) TakesValue() bool { return true }
func (f *DirectoryFlag) GetUsage() string { return f.Usage }
func (f *DirectoryFlag) GetValue() string { return f.Value.String() }
-func (f *DirectoryFlag) GetEnvVars() []string { return nil } // env not supported
+func (f *DirectoryFlag) GetEnvVars() []string { return f.EnvVars }
func (f *DirectoryFlag) GetDefaultText() string {
if f.DefaultText != "" {
@@ -156,6 +167,7 @@
Value TextMarshaler
Aliases []string
+ EnvVars []string
}
// For cli.Flag:
@@ -165,6 +177,16 @@ func (f *TextMarshalerFlag) IsSet() bool { return f.HasBeenSet }
func (f *TextMarshalerFlag) String() string { return cli.FlagStringer(f) }
func (f *TextMarshalerFlag) Apply(set *flag.FlagSet) error {
+ for _, envVar := range f.EnvVars {
+ envVar = strings.TrimSpace(envVar)
+ if value, found := syscall.Getenv(envVar); found {
+ if err := f.Value.UnmarshalText([]byte(value)); err != nil {
+ return fmt.Errorf("could not parse %q from environment variable %q for flag %s: %s", value, envVar, f.Name, err)
+ }
+ f.HasBeenSet = true
+ break
+ }
+ }
eachName(f, func(name string) {
set.Var(textMarshalerVal{f.Value}, f.Name, f.Usage)
})
@@ -187,7 +209,7 @@ // For cli.DocGenerationFlag:
func (f *TextMarshalerFlag) TakesValue() bool { return true }
func (f *TextMarshalerFlag) GetUsage() string { return f.Usage }
-func (f *TextMarshalerFlag) GetEnvVars() []string { return nil } // env not supported
+func (f *TextMarshalerFlag) GetEnvVars() []string { return f.EnvVars }
func (f *TextMarshalerFlag) GetValue() string {
t, err := f.Value.MarshalText()
@@ -237,6 +259,7 @@
Value *big.Int
Aliases []string
+ EnvVars []string
}
// For cli.Flag:
@@ -246,6 +269,16 @@ func (f *BigFlag) IsSet() bool { return f.HasBeenSet }
func (f *BigFlag) String() string { return cli.FlagStringer(f) }
func (f *BigFlag) Apply(set *flag.FlagSet) error {
+ for _, envVar := range f.EnvVars {
+ envVar = strings.TrimSpace(envVar)
+ if value, found := syscall.Getenv(envVar); found {
+ if _, ok := f.Value.SetString(value, 10); !ok {
+ return fmt.Errorf("could not parse %q from environment variable %q for flag %s", value, envVar, f.Name)
+ }
+ f.HasBeenSet = true
+ break
+ }
+ }
eachName(f, func(name string) {
f.Value = new(big.Int)
set.Var((*bigValue)(f.Value), f.Name, f.Usage)
@@ -271,7 +304,7 @@
func (f *BigFlag) TakesValue() bool { return true }
func (f *BigFlag) GetUsage() string { return f.Usage }
func (f *BigFlag) GetValue() string { return f.Value.String() }
-func (f *BigFlag) GetEnvVars() []string { return nil } // env not supported
+func (f *BigFlag) GetEnvVars() []string { return f.EnvVars }
func (f *BigFlag) GetDefaultText() string {
if f.DefaultText != "" {
diff --git ethereum/go-ethereum/internal/flags/helpers.go taikoxyz/taiko-geth/internal/flags/helpers.go
index f210e729dd27b512332e441192f37e6fcfd56f78..d4b8e373cc45dbb1f73dea0124cf1fc851475902 100644
--- ethereum/go-ethereum/internal/flags/helpers.go
+++ taikoxyz/taiko-geth/internal/flags/helpers.go
@@ -18,12 +18,21 @@ package flags
import (
"fmt"
+ "os"
+ "regexp"
+ "sort"
"strings"
"github.com/ethereum/go-ethereum/internal/version"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
+ "github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
)
+
+// usecolor defines whether the CLI help should use colored output or normal dumb
+// colorless terminal formatting.
+var usecolor = (isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())) && os.Getenv("TERM") != "dumb"
// NewApp creates an app with sane defaults.
func NewApp(usage string) *cli.App {
@@ -129,6 +138,14 @@ }
}
func init() {
+ if usecolor {
+ // Annotate all help categories with colors
+ cli.AppHelpTemplate = regexp.MustCompile("[A-Z ]+:").ReplaceAllString(cli.AppHelpTemplate, "\u001B[33m$0\u001B[0m")
+
+ // Annotate flag categories with colors (private template, so need to
+ // copy-paste the entire thing here...)
+ cli.AppHelpTemplate = strings.ReplaceAll(cli.AppHelpTemplate, "{{template \"visibleFlagCategoryTemplate\" .}}", "{{range .VisibleFlagCategories}}\n {{if .Name}}\u001B[33m{{.Name}}\u001B[0m\n\n {{end}}{{$flglen := len .Flags}}{{range $i, $e := .Flags}}{{if eq (subtract $flglen $i) 1}}{{$e}}\n{{else}}{{$e}}\n {{end}}{{end}}{{end}}")
+ }
cli.FlagStringer = FlagString
}
@@ -138,37 +155,31 @@ df, ok := f.(cli.DocGenerationFlag)
if !ok {
return ""
}
-
needsPlaceholder := df.TakesValue()
placeholder := ""
if needsPlaceholder {
placeholder = "value"
}
- namesText := pad(cli.FlagNamePrefixer(df.Names(), placeholder), 30)
+ namesText := cli.FlagNamePrefixer(df.Names(), placeholder)
defaultValueString := ""
if s := df.GetDefaultText(); s != "" {
defaultValueString = " (default: " + s + ")"
}
-
- usage := strings.TrimSpace(df.GetUsage())
envHint := strings.TrimSpace(cli.FlagEnvHinter(df.GetEnvVars(), ""))
- if len(envHint) > 0 {
- usage += " " + envHint
+ if envHint != "" {
+ envHint = " (" + envHint[1:len(envHint)-1] + ")"
}
-
+ usage := strings.TrimSpace(df.GetUsage())
usage = wordWrap(usage, 80)
usage = indent(usage, 10)
- return fmt.Sprintf("\n %s%s\n%s", namesText, defaultValueString, usage)
-}
-
-func pad(s string, length int) string {
- if len(s) < length {
- s += strings.Repeat(" ", length-len(s))
+ if usecolor {
+ return fmt.Sprintf("\n \u001B[32m%-35s%-35s\u001B[0m%s\n%s", namesText, defaultValueString, envHint, usage)
+ } else {
+ return fmt.Sprintf("\n %-35s%-35s%s\n%s", namesText, defaultValueString, envHint, usage)
}
- return s
}
func indent(s string, nspace int) string {
@@ -213,3 +224,78 @@ }
return output.String()
}
+
+// AutoEnvVars extends all the specific CLI flags with automatically generated
+// env vars by capitalizing the flag, replacing . with _ and prefixing it with
+// the specified string.
+//
+// Note, the prefix should *not* contain the separator underscore, that will be
+// added automatically.
+func AutoEnvVars(flags []cli.Flag, prefix string) {
+ for _, flag := range flags {
+ envvar := strings.ToUpper(prefix + "_" + strings.ReplaceAll(strings.ReplaceAll(flag.Names()[0], ".", "_"), "-", "_"))
+
+ switch flag := flag.(type) {
+ case *cli.StringFlag:
+ flag.EnvVars = append(flag.EnvVars, envvar)
+
+ case *cli.BoolFlag:
+ flag.EnvVars = append(flag.EnvVars, envvar)
+
+ case *cli.IntFlag:
+ flag.EnvVars = append(flag.EnvVars, envvar)
+
+ case *cli.Uint64Flag:
+ flag.EnvVars = append(flag.EnvVars, envvar)
+
+ case *cli.DurationFlag:
+ flag.EnvVars = append(flag.EnvVars, envvar)
+
+ case *cli.PathFlag:
+ flag.EnvVars = append(flag.EnvVars, envvar)
+
+ case *BigFlag:
+ flag.EnvVars = append(flag.EnvVars, envvar)
+
+ case *TextMarshalerFlag:
+ flag.EnvVars = append(flag.EnvVars, envvar)
+
+ case *DirectoryFlag:
+ flag.EnvVars = append(flag.EnvVars, envvar)
+ }
+ }
+}
+
+// CheckEnvVars iterates over all the environment variables and checks if any of
+// them look like a CLI flag but is not consumed. This can be used to detect old
+// or mistyped names.
+func CheckEnvVars(ctx *cli.Context, flags []cli.Flag, prefix string) {
+ known := make(map[string]string)
+ for _, flag := range flags {
+ docflag, ok := flag.(cli.DocGenerationFlag)
+ if !ok {
+ continue
+ }
+ for _, envvar := range docflag.GetEnvVars() {
+ known[envvar] = flag.Names()[0]
+ }
+ }
+ keyvals := os.Environ()
+ sort.Strings(keyvals)
+
+ for _, keyval := range keyvals {
+ key := strings.Split(keyval, "=")[0]
+ if !strings.HasPrefix(key, prefix) {
+ continue
+ }
+ if flag, ok := known[key]; ok {
+ if ctx.Count(flag) > 0 {
+ log.Info("Config environment variable found", "envvar", key, "shadowedby", "--"+flag)
+ } else {
+ log.Info("Config environment variable found", "envvar", key)
+ }
+ } else {
+ log.Warn("Unknown config environment variable", "envvar", key)
+ }
+ }
+}
diff --git ethereum/go-ethereum/internal/web3ext/web3ext.go taikoxyz/taiko-geth/internal/web3ext/web3ext.go
index 55f19f3104eeb9c32b4aac8a882147ee594bbdea..b86b5909d2cb09046dc8eeacb7ae7377e5d63d0a 100644
--- ethereum/go-ethereum/internal/web3ext/web3ext.go
+++ taikoxyz/taiko-geth/internal/web3ext/web3ext.go
@@ -617,6 +617,11 @@ call: 'eth_call',
params: 4,
inputFormatter: [web3._extend.formatters.inputCallFormatter, web3._extend.formatters.inputDefaultBlockNumberFormatter, null, null],
}),
+ new web3._extend.Method({
+ name: 'getBlockReceipts',
+ call: 'eth_getBlockReceipts',
+ params: 1,
+ }),
],
properties: [
new web3._extend.Property({
diff --git ethereum/go-ethereum/light/lightchain_test.go taikoxyz/taiko-geth/light/lightchain_test.go
index e3d756f801118b7d2d4d3494aa96a926f61bbb3c..5694ca72c26693fde44814edf3cdc12364a12ba3 100644
--- ethereum/go-ethereum/light/lightchain_test.go
+++ taikoxyz/taiko-geth/light/lightchain_test.go
@@ -29,6 +29,7 @@ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
// So we can deterministically seed different blockchains
@@ -55,7 +56,7 @@ // header only chain.
func newCanonical(n int) (ethdb.Database, *LightChain, error) {
db := rawdb.NewMemoryDatabase()
gspec := core.Genesis{Config: params.TestChainConfig}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
blockchain, _ := NewLightChain(&dummyOdr{db: db, indexerConfig: TestClientIndexerConfig}, gspec.Config, ethash.NewFaker())
// Create and inject the requested chain
@@ -75,7 +76,7 @@ gspec := &core.Genesis{
Difficulty: big.NewInt(1),
Config: params.TestChainConfig,
}
- gspec.MustCommit(db)
+ gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
lc, err := NewLightChain(&dummyOdr{db: db}, gspec.Config, ethash.NewFullFaker())
if err != nil {
panic(err)
diff --git ethereum/go-ethereum/light/odr.go taikoxyz/taiko-geth/light/odr.go
index 2597027435ba2fdf7e52add58e76015bdd43f125..39f626ee2c5368bf23ee41f21fe66aea1a6616eb 100644
--- ethereum/go-ethereum/light/odr.go
+++ taikoxyz/taiko-geth/light/odr.go
@@ -27,6 +27,7 @@ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// NoOdr is the default context passed to an ODR capable function when the ODR
@@ -90,7 +91,7 @@ // TrieRequest is the ODR request type for state/storage trie entries
type TrieRequest struct {
Id *TrieID
Key []byte
- Proof *NodeSet
+ Proof *trienode.ProofSet
}
// StoreResult stores the retrieved data in local database
@@ -143,7 +144,7 @@ ChtNum, BlockNum uint64
ChtRoot common.Hash
Header *types.Header
Td *big.Int
- Proof *NodeSet
+ Proof *trienode.ProofSet
}
// StoreResult stores the retrieved data in local database
@@ -163,7 +164,7 @@ BitIdx uint
SectionIndexList []uint64
BloomTrieRoot common.Hash
BloomBits [][]byte
- Proofs *NodeSet
+ Proofs *trienode.ProofSet
}
// StoreResult stores the retrieved data in local database
diff --git ethereum/go-ethereum/light/odr_test.go taikoxyz/taiko-geth/light/odr_test.go
index 79f901bbdb680fd4f96258ec9a62c29de7456a85..c415d73e7ef2396556eeed0e4a81da016c4512c8 100644
--- ethereum/go-ethereum/light/odr_test.go
+++ taikoxyz/taiko-geth/light/odr_test.go
@@ -36,6 +36,8 @@ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -94,7 +96,7 @@ }
if err != nil {
panic(err)
}
- nodes := NewNodeSet()
+ nodes := trienode.NewProofSet()
t.Prove(req.Key, nodes)
req.Proof = nodes
case *CodeRequest:
@@ -282,7 +284,7 @@ if _, err := blockchain.InsertChain(gchain); err != nil {
t.Fatal(err)
}
- gspec.MustCommit(ldb)
+ gspec.MustCommit(ldb, trie.NewDatabase(ldb, trie.HashDefaults))
odr := &testOdr{sdb: sdb, ldb: ldb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig}
lightchain, err := NewLightChain(odr, gspec.Config, ethash.NewFullFaker())
if err != nil {
diff --git ethereum/go-ethereum/light/odr_util.go taikoxyz/taiko-geth/light/odr_util.go
index 02379ce5fff633443a9ddfd37f806af0e0033680..9cac7df4fa98323fbf6a4a2e44c43b7584a84a03 100644
--- ethereum/go-ethereum/light/odr_util.go
+++ taikoxyz/taiko-geth/light/odr_util.go
@@ -17,7 +17,6 @@
package light
import (
- "bytes"
"context"
"errors"
"math/big"
@@ -126,7 +125,7 @@ if err != nil {
return nil, err
}
body := new(types.Body)
- if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
+ if err := rlp.DecodeBytes(data, body); err != nil {
return nil, err
}
return body, nil
diff --git ethereum/go-ethereum/light/postprocess.go taikoxyz/taiko-geth/light/postprocess.go
index 567814e2bfd7366d1210c1ee3d196433fdffdc30..a317e30b90a45c1dbb15dfb9985a96048f3182aa 100644
--- ethereum/go-ethereum/light/postprocess.go
+++ taikoxyz/taiko-geth/light/postprocess.go
@@ -145,7 +145,7 @@ backend := &ChtIndexerBackend{
diskdb: db,
odr: odr,
trieTable: trieTable,
- triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down
+ triedb: trie.NewDatabase(trieTable, trie.HashDefaults),
sectionSize: size,
disablePruning: disablePruning,
}
@@ -348,7 +348,7 @@ backend := &BloomTrieIndexerBackend{
diskdb: db,
odr: odr,
trieTable: trieTable,
- triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down
+ triedb: trie.NewDatabase(trieTable, trie.HashDefaults),
parentSize: parentSize,
size: size,
disablePruning: disablePruning,
@@ -363,7 +363,7 @@ // ODR backend in order to be able to add new entries and calculate subsequent root hashes
func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
indexCh := make(chan uint, types.BloomBitLength)
type res struct {
- nodes *NodeSet
+ nodes *trienode.ProofSet
err error
}
resCh := make(chan res, types.BloomBitLength)
diff --git ethereum/go-ethereum/light/trie.go taikoxyz/taiko-geth/light/trie.go
index 4967cc74e5ba813c50436b3bd34b5cfda830c732..1847f1e71b3c6db84ff4f293d197624c33e6c31d 100644
--- ethereum/go-ethereum/light/trie.go
+++ taikoxyz/taiko-geth/light/trie.go
@@ -215,7 +215,8 @@ id = trie.StorageTrieID(t.id.StateRoot, crypto.Keccak256Hash(t.id.AccountAddress), t.id.Root)
} else {
id = trie.StateTrieID(t.id.StateRoot)
}
- t.trie, err = trie.New(id, trie.NewDatabase(t.db.backend.Database()))
+ triedb := trie.NewDatabase(t.db.backend.Database(), trie.HashDefaults)
+ t.trie, err = trie.New(id, triedb)
}
if err == nil {
err = fn()
@@ -247,7 +248,8 @@ id = trie.StorageTrieID(t.id.StateRoot, crypto.Keccak256Hash(t.id.AccountAddress), t.id.Root)
} else {
id = trie.StateTrieID(t.id.StateRoot)
}
- t, err := trie.New(id, trie.NewDatabase(t.db.backend.Database()))
+ triedb := trie.NewDatabase(t.db.backend.Database(), trie.HashDefaults)
+ t, err := trie.New(id, triedb)
if err == nil {
it.t.trie = t
}
diff --git ethereum/go-ethereum/light/trie_test.go taikoxyz/taiko-geth/light/trie_test.go
index ad7d769c84a440ee6fce81783f7eab1e1bdd780d..fe724e9eea563c8ce2ec1595efeca8b24e3e6628 100644
--- ethereum/go-ethereum/light/trie_test.go
+++ taikoxyz/taiko-geth/light/trie_test.go
@@ -50,7 +50,7 @@ if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
}
- gspec.MustCommit(lightdb)
+ gspec.MustCommit(lightdb, trie.NewDatabase(lightdb, trie.HashDefaults))
ctx := context.Background()
odr := &testOdr{sdb: fulldb, ldb: lightdb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig}
head := blockchain.CurrentHeader()
diff --git ethereum/go-ethereum/light/txpool_test.go taikoxyz/taiko-geth/light/txpool_test.go
index 1181e3889e5fbefb4b22cc5e29fac982faa61f99..1eec7bc4277993dd317d92db875e0c3b09b2c193 100644
--- ethereum/go-ethereum/light/txpool_test.go
+++ taikoxyz/taiko-geth/light/txpool_test.go
@@ -30,6 +30,7 @@ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
type testTxRelay struct {
@@ -96,7 +97,7 @@ if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
}
- gspec.MustCommit(ldb)
+ gspec.MustCommit(ldb, trie.NewDatabase(ldb, trie.HashDefaults))
odr := &testOdr{sdb: sdb, ldb: ldb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig}
relay := &testTxRelay{
send: make(chan int, 1),
diff --git ethereum/go-ethereum/log/format.go taikoxyz/taiko-geth/log/format.go
index 6a03013b816c3c726e77feeb48f6e5603a25513e..1adf79c17e684055787a52f6a3b4f3156b964353 100644
--- ethereum/go-ethereum/log/format.go
+++ taikoxyz/taiko-geth/log/format.go
@@ -33,7 +33,15 @@ // PrintOrigins sets or unsets log location (file:line) printing for terminal
// format output.
func PrintOrigins(print bool) {
locationEnabled.Store(print)
+ if print {
+ stackEnabled.Store(true)
+ }
}
+
+// stackEnabled is an atomic flag controlling whether the log handler needs
+// to store the callsite stack. This is needed in case any handler wants to
+// print locations (locationEnabled), use vmodule, or print full stacks (BacktraceAt).
+var stackEnabled atomic.Bool
// locationEnabled is an atomic flag controlling whether the terminal formatter
// should append the log locations too when printing entries.
diff --git ethereum/go-ethereum/log/handler.go taikoxyz/taiko-geth/log/handler.go
index 892cfcc3e1ac5de176d23c7f6753aca55c87fb4d..4a0cf578f6cd23d680019a362868fe49e1c7b5f4 100644
--- ethereum/go-ethereum/log/handler.go
+++ taikoxyz/taiko-geth/log/handler.go
@@ -7,6 +7,7 @@ "net"
"os"
"reflect"
"sync"
+ "sync/atomic"
"github.com/go-stack/stack"
)
@@ -354,3 +355,21 @@
func (m muster) NetHandler(network, addr string, fmtr Format) Handler {
return must(NetHandler(network, addr, fmtr))
}
+
+// swapHandler wraps another handler that may be swapped out
+// dynamically at runtime in a thread-safe fashion.
+type swapHandler struct {
+ handler atomic.Value
+}
+
+func (h *swapHandler) Log(r *Record) error {
+ return (*h.handler.Load().(*Handler)).Log(r)
+}
+
+func (h *swapHandler) Swap(newHandler Handler) {
+ h.handler.Store(&newHandler)
+}
+
+func (h *swapHandler) Get() Handler {
+ return *h.handler.Load().(*Handler)
+}
diff --git ethereum/go-ethereum/log/handler_glog.go taikoxyz/taiko-geth/log/handler_glog.go
index 6db5f1a4c9ba353a76f7f23187374182ceaec071..afca0808b38a38a199420960a78a973721dd9ee0 100644
--- ethereum/go-ethereum/log/handler_glog.go
+++ taikoxyz/taiko-geth/log/handler_glog.go
@@ -139,7 +139,10 @@
h.patterns = filter
h.siteCache = make(map[uintptr]Lvl)
h.override.Store(len(filter) != 0)
-
+ // Enable location storage (globally)
+ if len(h.patterns) > 0 {
+ stackEnabled.Store(true)
+ }
return nil
}
@@ -172,7 +175,8 @@ defer h.lock.Unlock()
h.location = location
h.backtrace.Store(len(location) > 0)
-
+ // Enable location storage (globally)
+ stackEnabled.Store(true)
return nil
}
diff --git ethereum/go-ethereum/log/handler_go13.go taikoxyz/taiko-geth/log/handler_go13.go
deleted file mode 100644
index 4df694debed86c78da1fbf9e84f45beca29688b9..0000000000000000000000000000000000000000
--- ethereum/go-ethereum/log/handler_go13.go
+++ /dev/null
@@ -1,27 +0,0 @@
-//go:build !go1.4
-// +build !go1.4
-
-package log
-
-import (
- "sync/atomic"
- "unsafe"
-)
-
-// swapHandler wraps another handler that may be swapped out
-// dynamically at runtime in a thread-safe fashion.
-type swapHandler struct {
- handler unsafe.Pointer
-}
-
-func (h *swapHandler) Log(r *Record) error {
- return h.Get().Log(r)
-}
-
-func (h *swapHandler) Get() Handler {
- return *(*Handler)(atomic.LoadPointer(&h.handler))
-}
-
-func (h *swapHandler) Swap(newHandler Handler) {
- atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler))
-}
diff --git ethereum/go-ethereum/log/handler_go14.go taikoxyz/taiko-geth/log/handler_go14.go
deleted file mode 100644
index d0cb14aa063b2ff6f20e9f1eb1881fe776482c57..0000000000000000000000000000000000000000
--- ethereum/go-ethereum/log/handler_go14.go
+++ /dev/null
@@ -1,24 +0,0 @@
-//go:build go1.4
-// +build go1.4
-
-package log
-
-import "sync/atomic"
-
-// swapHandler wraps another handler that may be swapped out
-// dynamically at runtime in a thread-safe fashion.
-type swapHandler struct {
- handler atomic.Value
-}
-
-func (h *swapHandler) Log(r *Record) error {
- return (*h.handler.Load().(*Handler)).Log(r)
-}
-
-func (h *swapHandler) Swap(newHandler Handler) {
- h.handler.Store(&newHandler)
-}
-
-func (h *swapHandler) Get() Handler {
- return *h.handler.Load().(*Handler)
-}
diff --git ethereum/go-ethereum/log/logger.go taikoxyz/taiko-geth/log/logger.go
index 4e471a22da9a3ac3c524c98478da0450842ea646..42e7e375d0025fd06618b7a216e6e906f2efac07 100644
--- ethereum/go-ethereum/log/logger.go
+++ taikoxyz/taiko-geth/log/logger.go
@@ -177,19 +177,22 @@ h *swapHandler
}
func (l *logger) write(msg string, lvl Lvl, ctx []interface{}, skip int) {
- l.h.Log(&Record{
+ record := &Record{
Time: time.Now(),
Lvl: lvl,
Msg: msg,
Ctx: newContext(l.ctx, ctx),
- Call: stack.Caller(skip),
KeyNames: RecordKeyNames{
Time: timeKey,
Msg: msgKey,
Lvl: lvlKey,
Ctx: ctxKey,
},
- })
+ }
+ if stackEnabled.Load() {
+ record.Call = stack.Caller(skip)
+ }
+ l.h.Log(record)
}
func (l *logger) New(ctx ...interface{}) Logger {
diff --git ethereum/go-ethereum/log/logger_test.go taikoxyz/taiko-geth/log/logger_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..2e59b3fdf0b1140e7073a830a6cc01f74e5a0dc1
--- /dev/null
+++ taikoxyz/taiko-geth/log/logger_test.go
@@ -0,0 +1,67 @@
+package log
+
+import (
+ "bytes"
+ "os"
+ "strings"
+ "testing"
+)
+
+// TestLoggingWithTrace checks that if BackTraceAt is set, then the
+// gloghandler is capable of spitting out a stacktrace
+func TestLoggingWithTrace(t *testing.T) {
+ defer stackEnabled.Store(stackEnabled.Load())
+ out := new(bytes.Buffer)
+ logger := New()
+ {
+ glog := NewGlogHandler(StreamHandler(out, TerminalFormat(false)))
+ glog.Verbosity(LvlTrace)
+ if err := glog.BacktraceAt("logger_test.go:24"); err != nil {
+ t.Fatal(err)
+ }
+ logger.SetHandler(glog)
+ }
+ logger.Trace("a message", "foo", "bar") // Will be bumped to INFO
+ have := out.String()
+ if !strings.HasPrefix(have, "INFO") {
+ t.Fatalf("backtraceat should bump level to info: %s", have)
+ }
+ // The timestamp is locale-dependent, so we want to trim that off
+ // "INFO [01-01|00:00:00.000] a messag ..." -> "a messag..."
+ have = strings.Split(have, "]")[1]
+ wantPrefix := " a message\n\ngoroutine"
+ if !strings.HasPrefix(have, wantPrefix) {
+ t.Errorf("\nhave: %q\nwant: %q\n", have, wantPrefix)
+ }
+}
+
+// TestLoggingWithVmodule checks that vmodule works.
+func TestLoggingWithVmodule(t *testing.T) {
+ defer stackEnabled.Store(stackEnabled.Load())
+ out := new(bytes.Buffer)
+ logger := New()
+ {
+ glog := NewGlogHandler(StreamHandler(out, TerminalFormat(false)))
+ glog.Verbosity(LvlCrit)
+ logger.SetHandler(glog)
+ logger.Warn("This should not be seen", "ignored", "true")
+ glog.Vmodule("logger_test.go=5")
+ }
+ logger.Trace("a message", "foo", "bar")
+ have := out.String()
+ // The timestamp is locale-dependent, so we want to trim that off
+ // "INFO [01-01|00:00:00.000] a messag ..." -> "a messag..."
+ have = strings.Split(have, "]")[1]
+ want := " a message foo=bar\n"
+ if have != want {
+ t.Errorf("\nhave: %q\nwant: %q\n", have, want)
+ }
+}
+
+func BenchmarkTraceLogging(b *testing.B) {
+ Root().SetHandler(LvlFilterHandler(LvlInfo, StreamHandler(os.Stderr, TerminalFormat(true))))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Trace("a message", "v", i)
+ }
+}
diff --git ethereum/go-ethereum/metrics/counter.go taikoxyz/taiko-geth/metrics/counter.go
index 55e1c59540f62f491e563aa864c73b1c04631eba..cb81599c215aafd1f72a0a6cea090904ac43e8e6 100644
--- ethereum/go-ethereum/metrics/counter.go
+++ taikoxyz/taiko-geth/metrics/counter.go
@@ -4,13 +4,16 @@ import (
"sync/atomic"
)
+type CounterSnapshot interface {
+ Count() int64
+}
+
// Counters hold an int64 value that can be incremented and decremented.
type Counter interface {
Clear()
- Count() int64
Dec(int64)
Inc(int64)
- Snapshot() Counter
+ Snapshot() CounterSnapshot
}
// GetOrRegisterCounter returns an existing Counter or constructs and registers
@@ -38,13 +41,13 @@ func NewCounter() Counter {
if !Enabled {
return NilCounter{}
}
- return &StandardCounter{}
+ return new(StandardCounter)
}
// NewCounterForced constructs a new StandardCounter and returns it no matter if
// the global switch is enabled or not.
func NewCounterForced() Counter {
- return &StandardCounter{}
+ return new(StandardCounter)
}
// NewRegisteredCounter constructs and registers a new StandardCounter.
@@ -70,75 +73,40 @@ r.Register(name, c)
return c
}
-// CounterSnapshot is a read-only copy of another Counter.
-type CounterSnapshot int64
-
-// Clear panics.
-func (CounterSnapshot) Clear() {
- panic("Clear called on a CounterSnapshot")
-}
+// counterSnapshot is a read-only copy of another Counter.
+type counterSnapshot int64
// Count returns the count at the time the snapshot was taken.
-func (c CounterSnapshot) Count() int64 { return int64(c) }
-
-// Dec panics.
-func (CounterSnapshot) Dec(int64) {
- panic("Dec called on a CounterSnapshot")
-}
-
-// Inc panics.
-func (CounterSnapshot) Inc(int64) {
- panic("Inc called on a CounterSnapshot")
-}
-
-// Snapshot returns the snapshot.
-func (c CounterSnapshot) Snapshot() Counter { return c }
+func (c counterSnapshot) Count() int64 { return int64(c) }
// NilCounter is a no-op Counter.
type NilCounter struct{}
-// Clear is a no-op.
-func (NilCounter) Clear() {}
-
-// Count is a no-op.
-func (NilCounter) Count() int64 { return 0 }
-
-// Dec is a no-op.
-func (NilCounter) Dec(i int64) {}
-
-// Inc is a no-op.
-func (NilCounter) Inc(i int64) {}
-
-// Snapshot is a no-op.
-func (NilCounter) Snapshot() Counter { return NilCounter{} }
+func (NilCounter) Clear() {}
+func (NilCounter) Dec(i int64) {}
+func (NilCounter) Inc(i int64) {}
+func (NilCounter) Snapshot() CounterSnapshot { return (*emptySnapshot)(nil) }
// StandardCounter is the standard implementation of a Counter and uses the
// sync/atomic package to manage a single int64 value.
-type StandardCounter struct {
- count atomic.Int64
-}
+type StandardCounter atomic.Int64
// Clear sets the counter to zero.
func (c *StandardCounter) Clear() {
- c.count.Store(0)
-}
-
-// Count returns the current count.
-func (c *StandardCounter) Count() int64 {
- return c.count.Load()
+ (*atomic.Int64)(c).Store(0)
}
// Dec decrements the counter by the given amount.
func (c *StandardCounter) Dec(i int64) {
- c.count.Add(-i)
+ (*atomic.Int64)(c).Add(-i)
}
// Inc increments the counter by the given amount.
func (c *StandardCounter) Inc(i int64) {
- c.count.Add(i)
+ (*atomic.Int64)(c).Add(i)
}
// Snapshot returns a read-only copy of the counter.
-func (c *StandardCounter) Snapshot() Counter {
- return CounterSnapshot(c.Count())
+func (c *StandardCounter) Snapshot() CounterSnapshot {
+ return counterSnapshot((*atomic.Int64)(c).Load())
}
diff --git ethereum/go-ethereum/metrics/counter_float64.go taikoxyz/taiko-geth/metrics/counter_float64.go
index d1197bb8e0ae9e7747f1eff36e9ae7ebc9b7210a..15c81494efb819b8200e331f3d52839813df6f5f 100644
--- ethereum/go-ethereum/metrics/counter_float64.go
+++ taikoxyz/taiko-geth/metrics/counter_float64.go
@@ -5,13 +5,16 @@ "math"
"sync/atomic"
)
+type CounterFloat64Snapshot interface {
+ Count() float64
+}
+
// CounterFloat64 holds a float64 value that can be incremented and decremented.
type CounterFloat64 interface {
Clear()
- Count() float64
Dec(float64)
Inc(float64)
- Snapshot() CounterFloat64
+ Snapshot() CounterFloat64Snapshot
}
// GetOrRegisterCounterFloat64 returns an existing CounterFloat64 or constructs and registers
@@ -71,47 +74,19 @@ r.Register(name, c)
return c
}
-// CounterFloat64Snapshot is a read-only copy of another CounterFloat64.
-type CounterFloat64Snapshot float64
-
-// Clear panics.
-func (CounterFloat64Snapshot) Clear() {
- panic("Clear called on a CounterFloat64Snapshot")
-}
+// counterFloat64Snapshot is a read-only copy of another CounterFloat64.
+type counterFloat64Snapshot float64
// Count returns the value at the time the snapshot was taken.
-func (c CounterFloat64Snapshot) Count() float64 { return float64(c) }
-
-// Dec panics.
-func (CounterFloat64Snapshot) Dec(float64) {
- panic("Dec called on a CounterFloat64Snapshot")
-}
-
-// Inc panics.
-func (CounterFloat64Snapshot) Inc(float64) {
- panic("Inc called on a CounterFloat64Snapshot")
-}
-
-// Snapshot returns the snapshot.
-func (c CounterFloat64Snapshot) Snapshot() CounterFloat64 { return c }
+func (c counterFloat64Snapshot) Count() float64 { return float64(c) }
-// NilCounterFloat64 is a no-op CounterFloat64.
type NilCounterFloat64 struct{}
-// Clear is a no-op.
-func (NilCounterFloat64) Clear() {}
-
-// Count is a no-op.
-func (NilCounterFloat64) Count() float64 { return 0.0 }
-
-// Dec is a no-op.
-func (NilCounterFloat64) Dec(i float64) {}
-
-// Inc is a no-op.
-func (NilCounterFloat64) Inc(i float64) {}
-
-// Snapshot is a no-op.
-func (NilCounterFloat64) Snapshot() CounterFloat64 { return NilCounterFloat64{} }
+func (NilCounterFloat64) Clear() {}
+func (NilCounterFloat64) Count() float64 { return 0.0 }
+func (NilCounterFloat64) Dec(i float64) {}
+func (NilCounterFloat64) Inc(i float64) {}
+func (NilCounterFloat64) Snapshot() CounterFloat64Snapshot { return NilCounterFloat64{} }
// StandardCounterFloat64 is the standard implementation of a CounterFloat64 and uses the
// atomic to manage a single float64 value.
@@ -124,11 +99,6 @@ func (c *StandardCounterFloat64) Clear() {
c.floatBits.Store(0)
}
-// Count returns the current value.
-func (c *StandardCounterFloat64) Count() float64 {
- return math.Float64frombits(c.floatBits.Load())
-}
-
// Dec decrements the counter by the given amount.
func (c *StandardCounterFloat64) Dec(v float64) {
atomicAddFloat(&c.floatBits, -v)
@@ -140,8 +110,9 @@ atomicAddFloat(&c.floatBits, v)
}
// Snapshot returns a read-only copy of the counter.
-func (c *StandardCounterFloat64) Snapshot() CounterFloat64 {
- return CounterFloat64Snapshot(c.Count())
+func (c *StandardCounterFloat64) Snapshot() CounterFloat64Snapshot {
+ v := math.Float64frombits(c.floatBits.Load())
+ return counterFloat64Snapshot(v)
}
func atomicAddFloat(fbits *atomic.Uint64, v float64) {
diff --git ethereum/go-ethereum/metrics/counter_float_64_test.go taikoxyz/taiko-geth/metrics/counter_float_64_test.go
index f17aca330cbed85f9ce6a11d9baaafbcabc3a9f4..c21bd3307fa104869b77491b4596797e5ef10698 100644
--- ethereum/go-ethereum/metrics/counter_float_64_test.go
+++ taikoxyz/taiko-geth/metrics/counter_float_64_test.go
@@ -27,7 +27,7 @@ wg.Done()
}()
}
wg.Wait()
- if have, want := c.Count(), 10.0*float64(b.N); have != want {
+ if have, want := c.Snapshot().Count(), 10.0*float64(b.N); have != want {
b.Fatalf("have %f want %f", have, want)
}
}
@@ -36,7 +36,7 @@ func TestCounterFloat64Clear(t *testing.T) {
c := NewCounterFloat64()
c.Inc(1.0)
c.Clear()
- if count := c.Count(); count != 0 {
+ if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
@@ -44,7 +44,7 @@
func TestCounterFloat64Dec1(t *testing.T) {
c := NewCounterFloat64()
c.Dec(1.0)
- if count := c.Count(); count != -1.0 {
+ if count := c.Snapshot().Count(); count != -1.0 {
t.Errorf("c.Count(): -1.0 != %v\n", count)
}
}
@@ -52,7 +52,7 @@
func TestCounterFloat64Dec2(t *testing.T) {
c := NewCounterFloat64()
c.Dec(2.0)
- if count := c.Count(); count != -2.0 {
+ if count := c.Snapshot().Count(); count != -2.0 {
t.Errorf("c.Count(): -2.0 != %v\n", count)
}
}
@@ -60,7 +60,7 @@
func TestCounterFloat64Inc1(t *testing.T) {
c := NewCounterFloat64()
c.Inc(1.0)
- if count := c.Count(); count != 1.0 {
+ if count := c.Snapshot().Count(); count != 1.0 {
t.Errorf("c.Count(): 1.0 != %v\n", count)
}
}
@@ -68,7 +68,7 @@
func TestCounterFloat64Inc2(t *testing.T) {
c := NewCounterFloat64()
c.Inc(2.0)
- if count := c.Count(); count != 2.0 {
+ if count := c.Snapshot().Count(); count != 2.0 {
t.Errorf("c.Count(): 2.0 != %v\n", count)
}
}
@@ -85,7 +85,7 @@ }
func TestCounterFloat64Zero(t *testing.T) {
c := NewCounterFloat64()
- if count := c.Count(); count != 0 {
+ if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
@@ -93,7 +93,7 @@
func TestGetOrRegisterCounterFloat64(t *testing.T) {
r := NewRegistry()
NewRegisteredCounterFloat64("foo", r).Inc(47.0)
- if c := GetOrRegisterCounterFloat64("foo", r); c.Count() != 47.0 {
+ if c := GetOrRegisterCounterFloat64("foo", r).Snapshot(); c.Count() != 47.0 {
t.Fatal(c)
}
}
diff --git ethereum/go-ethereum/metrics/counter_test.go taikoxyz/taiko-geth/metrics/counter_test.go
index af26ef1548fee722d2f19041a99e3c871a109a2a..1b15b23f215fe13c64c21ef3de34989a8fbea064 100644
--- ethereum/go-ethereum/metrics/counter_test.go
+++ taikoxyz/taiko-geth/metrics/counter_test.go
@@ -14,7 +14,7 @@ func TestCounterClear(t *testing.T) {
c := NewCounter()
c.Inc(1)
c.Clear()
- if count := c.Count(); count != 0 {
+ if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
@@ -22,7 +22,7 @@
func TestCounterDec1(t *testing.T) {
c := NewCounter()
c.Dec(1)
- if count := c.Count(); count != -1 {
+ if count := c.Snapshot().Count(); count != -1 {
t.Errorf("c.Count(): -1 != %v\n", count)
}
}
@@ -30,7 +30,7 @@
func TestCounterDec2(t *testing.T) {
c := NewCounter()
c.Dec(2)
- if count := c.Count(); count != -2 {
+ if count := c.Snapshot().Count(); count != -2 {
t.Errorf("c.Count(): -2 != %v\n", count)
}
}
@@ -38,7 +38,7 @@
func TestCounterInc1(t *testing.T) {
c := NewCounter()
c.Inc(1)
- if count := c.Count(); count != 1 {
+ if count := c.Snapshot().Count(); count != 1 {
t.Errorf("c.Count(): 1 != %v\n", count)
}
}
@@ -46,7 +46,7 @@
func TestCounterInc2(t *testing.T) {
c := NewCounter()
c.Inc(2)
- if count := c.Count(); count != 2 {
+ if count := c.Snapshot().Count(); count != 2 {
t.Errorf("c.Count(): 2 != %v\n", count)
}
}
@@ -63,7 +63,7 @@ }
func TestCounterZero(t *testing.T) {
c := NewCounter()
- if count := c.Count(); count != 0 {
+ if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
@@ -71,7 +71,7 @@
func TestGetOrRegisterCounter(t *testing.T) {
r := NewRegistry()
NewRegisteredCounter("foo", r).Inc(47)
- if c := GetOrRegisterCounter("foo", r); c.Count() != 47 {
+ if c := GetOrRegisterCounter("foo", r).Snapshot(); c.Count() != 47 {
t.Fatal(c)
}
}
diff --git ethereum/go-ethereum/metrics/doc.go taikoxyz/taiko-geth/metrics/doc.go
deleted file mode 100644
index 13f429c1689d24c765488286e4f1308384388fc0..0000000000000000000000000000000000000000
--- ethereum/go-ethereum/metrics/doc.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package metrics
-
-const epsilon = 0.0000000000000001
-const epsilonPercentile = .00000000001
diff --git ethereum/go-ethereum/metrics/ewma.go taikoxyz/taiko-geth/metrics/ewma.go
index ed95cba19b4f81224ee72bad5cbb6180ad76edf9..1d7a4f00cf4575f9c0f944be5fd560e44869e665 100644
--- ethereum/go-ethereum/metrics/ewma.go
+++ taikoxyz/taiko-geth/metrics/ewma.go
@@ -7,11 +7,14 @@ "sync/atomic"
"time"
)
+type EWMASnapshot interface {
+ Rate() float64
+}
+
// EWMAs continuously calculate an exponentially-weighted moving average
// based on an outside source of clock ticks.
type EWMA interface {
- Rate() float64
- Snapshot() EWMA
+ Snapshot() EWMASnapshot
Tick()
Update(int64)
}
@@ -36,40 +39,19 @@ func NewEWMA15() EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/15))
}
-// EWMASnapshot is a read-only copy of another EWMA.
-type EWMASnapshot float64
+// ewmaSnapshot is a read-only copy of another EWMA.
+type ewmaSnapshot float64
// Rate returns the rate of events per second at the time the snapshot was
// taken.
-func (a EWMASnapshot) Rate() float64 { return float64(a) }
-
-// Snapshot returns the snapshot.
-func (a EWMASnapshot) Snapshot() EWMA { return a }
-
-// Tick panics.
-func (EWMASnapshot) Tick() {
- panic("Tick called on an EWMASnapshot")
-}
-
-// Update panics.
-func (EWMASnapshot) Update(int64) {
- panic("Update called on an EWMASnapshot")
-}
+func (a ewmaSnapshot) Rate() float64 { return float64(a) }
// NilEWMA is a no-op EWMA.
type NilEWMA struct{}
-// Rate is a no-op.
-func (NilEWMA) Rate() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
-
-// Tick is a no-op.
-func (NilEWMA) Tick() {}
-
-// Update is a no-op.
-func (NilEWMA) Update(n int64) {}
+func (NilEWMA) Snapshot() EWMASnapshot { return (*emptySnapshot)(nil) }
+func (NilEWMA) Tick() {}
+func (NilEWMA) Update(n int64) {}
// StandardEWMA is the standard implementation of an EWMA and tracks the number
// of uncounted events and processes them on each tick. It uses the
@@ -77,37 +59,50 @@ // sync/atomic package to manage uncounted events.
type StandardEWMA struct {
uncounted atomic.Int64
alpha float64
- rate float64
- init bool
+ rate atomic.Uint64
+ init atomic.Bool
mutex sync.Mutex
}
-// Rate returns the moving average rate of events per second.
-func (a *StandardEWMA) Rate() float64 {
- a.mutex.Lock()
- defer a.mutex.Unlock()
- return a.rate * float64(time.Second)
-}
-
// Snapshot returns a read-only copy of the EWMA.
-func (a *StandardEWMA) Snapshot() EWMA {
- return EWMASnapshot(a.Rate())
+func (a *StandardEWMA) Snapshot() EWMASnapshot {
+ r := math.Float64frombits(a.rate.Load()) * float64(time.Second)
+ return ewmaSnapshot(r)
}
// Tick ticks the clock to update the moving average. It assumes it is called
// every five seconds.
func (a *StandardEWMA) Tick() {
- count := a.uncounted.Load()
- a.uncounted.Add(-count)
- instantRate := float64(count) / float64(5*time.Second)
+ // Optimization to avoid mutex locking in the hot-path.
+ if a.init.Load() {
+ a.updateRate(a.fetchInstantRate())
+ return
+ }
+ // Slow-path: this is only needed on the first Tick() and preserves transactional updating
+ // of init and rate in the else block. The first conditional is needed below because
+ // a different thread could have set a.init = 1 between the time of the first atomic load and when
+ // the lock was acquired.
a.mutex.Lock()
- defer a.mutex.Unlock()
- if a.init {
- a.rate += a.alpha * (instantRate - a.rate)
+ if a.init.Load() {
+ // The fetchInstantRate() uses atomic loading, which is unnecessary in this critical section
+ // but again, this section is only invoked on the first successful Tick() operation.
+ a.updateRate(a.fetchInstantRate())
} else {
- a.init = true
- a.rate = instantRate
+ a.init.Store(true)
+ a.rate.Store(math.Float64bits(a.fetchInstantRate()))
}
+ a.mutex.Unlock()
+}
+
+func (a *StandardEWMA) fetchInstantRate() float64 {
+ count := a.uncounted.Swap(0)
+ return float64(count) / float64(5*time.Second)
+}
+
+func (a *StandardEWMA) updateRate(instantRate float64) {
+ currentRate := math.Float64frombits(a.rate.Load())
+ currentRate += a.alpha * (instantRate - currentRate)
+ a.rate.Store(math.Float64bits(currentRate))
}
// Update adds n uncounted events.
diff --git ethereum/go-ethereum/metrics/ewma_test.go taikoxyz/taiko-geth/metrics/ewma_test.go
index 5b244191616e7cd8539175d0dd1e9f6e6a0caef1..9a91b43db81a4b499a5660477cb6b76a8b862701 100644
--- ethereum/go-ethereum/metrics/ewma_test.go
+++ taikoxyz/taiko-geth/metrics/ewma_test.go
@@ -5,6 +5,8 @@ "math"
"testing"
)
+const epsilon = 0.0000000000000001
+
func BenchmarkEWMA(b *testing.B) {
a := NewEWMA1()
b.ResetTimer()
@@ -14,72 +16,33 @@ a.Tick()
}
}
+func BenchmarkEWMAParallel(b *testing.B) {
+ a := NewEWMA1()
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ a.Update(1)
+ a.Tick()
+ }
+ })
+}
+
func TestEWMA1(t *testing.T) {
a := NewEWMA1()
a.Update(3)
a.Tick()
- if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
- t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.22072766470286553-rate) > epsilon {
- t.Errorf("1 minute a.Rate(): 0.22072766470286553 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.08120116994196772-rate) > epsilon {
- t.Errorf("2 minute a.Rate(): 0.08120116994196772 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.029872241020718428-rate) > epsilon {
- t.Errorf("3 minute a.Rate(): 0.029872241020718428 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.01098938333324054-rate) > epsilon {
- t.Errorf("4 minute a.Rate(): 0.01098938333324054 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.004042768199451294-rate) > epsilon {
- t.Errorf("5 minute a.Rate(): 0.004042768199451294 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.0014872513059998212-rate) > epsilon {
- t.Errorf("6 minute a.Rate(): 0.0014872513059998212 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.0005471291793327122-rate) > epsilon {
- t.Errorf("7 minute a.Rate(): 0.0005471291793327122 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.00020127757674150815-rate) > epsilon {
- t.Errorf("8 minute a.Rate(): 0.00020127757674150815 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(7.404588245200814e-05-rate) > epsilon {
- t.Errorf("9 minute a.Rate(): 7.404588245200814e-05 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(2.7239957857491083e-05-rate) > epsilon {
- t.Errorf("10 minute a.Rate(): 2.7239957857491083e-05 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(1.0021020474147462e-05-rate) > epsilon {
- t.Errorf("11 minute a.Rate(): 1.0021020474147462e-05 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(3.6865274119969525e-06-rate) > epsilon {
- t.Errorf("12 minute a.Rate(): 3.6865274119969525e-06 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(1.3561976441886433e-06-rate) > epsilon {
- t.Errorf("13 minute a.Rate(): 1.3561976441886433e-06 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(4.989172314621449e-07-rate) > epsilon {
- t.Errorf("14 minute a.Rate(): 4.989172314621449e-07 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(1.8354139230109722e-07-rate) > epsilon {
- t.Errorf("15 minute a.Rate(): 1.8354139230109722e-07 != %v\n", rate)
+ for i, want := range []float64{0.6,
+ 0.22072766470286553, 0.08120116994196772, 0.029872241020718428,
+ 0.01098938333324054, 0.004042768199451294, 0.0014872513059998212,
+ 0.0005471291793327122, 0.00020127757674150815, 7.404588245200814e-05,
+ 2.7239957857491083e-05, 1.0021020474147462e-05, 3.6865274119969525e-06,
+ 1.3561976441886433e-06, 4.989172314621449e-07, 1.8354139230109722e-07,
+ } {
+ if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon {
+ t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate)
+ }
+ elapseMinute(a)
}
}
@@ -87,68 +50,17 @@ func TestEWMA5(t *testing.T) {
a := NewEWMA5()
a.Update(3)
a.Tick()
- if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
- t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.49123845184678905-rate) > epsilon {
- t.Errorf("1 minute a.Rate(): 0.49123845184678905 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.4021920276213837-rate) > epsilon {
- t.Errorf("2 minute a.Rate(): 0.4021920276213837 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.32928698165641596-rate) > epsilon {
- t.Errorf("3 minute a.Rate(): 0.32928698165641596 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.269597378470333-rate) > epsilon {
- t.Errorf("4 minute a.Rate(): 0.269597378470333 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.2207276647028654-rate) > epsilon {
- t.Errorf("5 minute a.Rate(): 0.2207276647028654 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.18071652714732128-rate) > epsilon {
- t.Errorf("6 minute a.Rate(): 0.18071652714732128 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.14795817836496392-rate) > epsilon {
- t.Errorf("7 minute a.Rate(): 0.14795817836496392 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.12113791079679326-rate) > epsilon {
- t.Errorf("8 minute a.Rate(): 0.12113791079679326 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.09917933293295193-rate) > epsilon {
- t.Errorf("9 minute a.Rate(): 0.09917933293295193 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.08120116994196763-rate) > epsilon {
- t.Errorf("10 minute a.Rate(): 0.08120116994196763 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.06648189501740036-rate) > epsilon {
- t.Errorf("11 minute a.Rate(): 0.06648189501740036 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.05443077197364752-rate) > epsilon {
- t.Errorf("12 minute a.Rate(): 0.05443077197364752 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.04456414692860035-rate) > epsilon {
- t.Errorf("13 minute a.Rate(): 0.04456414692860035 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.03648603757513079-rate) > epsilon {
- t.Errorf("14 minute a.Rate(): 0.03648603757513079 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.0298722410207183831020718428-rate) > epsilon {
- t.Errorf("15 minute a.Rate(): 0.0298722410207183831020718428 != %v\n", rate)
+ for i, want := range []float64{
+ 0.6, 0.49123845184678905, 0.4021920276213837, 0.32928698165641596,
+ 0.269597378470333, 0.2207276647028654, 0.18071652714732128,
+ 0.14795817836496392, 0.12113791079679326, 0.09917933293295193,
+ 0.08120116994196763, 0.06648189501740036, 0.05443077197364752,
+ 0.04456414692860035, 0.03648603757513079, 0.0298722410207183831020718428,
+ } {
+ if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon {
+ t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate)
+ }
+ elapseMinute(a)
}
}
@@ -156,68 +68,17 @@ func TestEWMA15(t *testing.T) {
a := NewEWMA15()
a.Update(3)
a.Tick()
- if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
- t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.5613041910189706-rate) > epsilon {
- t.Errorf("1 minute a.Rate(): 0.5613041910189706 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.5251039914257684-rate) > epsilon {
- t.Errorf("2 minute a.Rate(): 0.5251039914257684 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.4912384518467888184678905-rate) > epsilon {
- t.Errorf("3 minute a.Rate(): 0.4912384518467888184678905 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.459557003018789-rate) > epsilon {
- t.Errorf("4 minute a.Rate(): 0.459557003018789 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.4299187863442732-rate) > epsilon {
- t.Errorf("5 minute a.Rate(): 0.4299187863442732 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.4021920276213831-rate) > epsilon {
- t.Errorf("6 minute a.Rate(): 0.4021920276213831 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.37625345116383313-rate) > epsilon {
- t.Errorf("7 minute a.Rate(): 0.37625345116383313 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.3519877317060185-rate) > epsilon {
- t.Errorf("8 minute a.Rate(): 0.3519877317060185 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.3292869816564153165641596-rate) > epsilon {
- t.Errorf("9 minute a.Rate(): 0.3292869816564153165641596 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.3080502714195546-rate) > epsilon {
- t.Errorf("10 minute a.Rate(): 0.3080502714195546 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.2881831806538789-rate) > epsilon {
- t.Errorf("11 minute a.Rate(): 0.2881831806538789 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.26959737847033216-rate) > epsilon {
- t.Errorf("12 minute a.Rate(): 0.26959737847033216 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.2522102307052083-rate) > epsilon {
- t.Errorf("13 minute a.Rate(): 0.2522102307052083 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.23594443252115815-rate) > epsilon {
- t.Errorf("14 minute a.Rate(): 0.23594443252115815 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.2207276647028646247028654470286553-rate) > epsilon {
- t.Errorf("15 minute a.Rate(): 0.2207276647028646247028654470286553 != %v\n", rate)
+ for i, want := range []float64{
+ 0.6, 0.5613041910189706, 0.5251039914257684, 0.4912384518467888184678905,
+ 0.459557003018789, 0.4299187863442732, 0.4021920276213831,
+ 0.37625345116383313, 0.3519877317060185, 0.3292869816564153165641596,
+ 0.3080502714195546, 0.2881831806538789, 0.26959737847033216,
+ 0.2522102307052083, 0.23594443252115815, 0.2207276647028646247028654470286553,
+ } {
+ if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon {
+ t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate)
+ }
+ elapseMinute(a)
}
}
diff --git ethereum/go-ethereum/metrics/exp/exp.go taikoxyz/taiko-geth/metrics/exp/exp.go
index 2b04eeab271f96167b8d95e858e79d54cfed49da..7e3f82a075fc3e384478350fae012dc4d128297c 100644
--- ethereum/go-ethereum/metrics/exp/exp.go
+++ taikoxyz/taiko-geth/metrics/exp/exp.go
@@ -95,24 +95,42 @@ exp.expvarLock.Unlock()
return v
}
-func (exp *exp) publishCounter(name string, metric metrics.Counter) {
+func (exp *exp) getInfo(name string) *expvar.String {
+ var v *expvar.String
+ exp.expvarLock.Lock()
+ p := expvar.Get(name)
+ if p != nil {
+ v = p.(*expvar.String)
+ } else {
+ v = new(expvar.String)
+ expvar.Publish(name, v)
+ }
+ exp.expvarLock.Unlock()
+ return v
+}
+
+func (exp *exp) publishCounter(name string, metric metrics.CounterSnapshot) {
v := exp.getInt(name)
v.Set(metric.Count())
}
-func (exp *exp) publishCounterFloat64(name string, metric metrics.CounterFloat64) {
+func (exp *exp) publishCounterFloat64(name string, metric metrics.CounterFloat64Snapshot) {
v := exp.getFloat(name)
v.Set(metric.Count())
}
-func (exp *exp) publishGauge(name string, metric metrics.Gauge) {
+func (exp *exp) publishGauge(name string, metric metrics.GaugeSnapshot) {
v := exp.getInt(name)
v.Set(metric.Value())
}
-func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64) {
+func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64Snapshot) {
exp.getFloat(name).Set(metric.Value())
}
+func (exp *exp) publishGaugeInfo(name string, metric metrics.GaugeInfoSnapshot) {
+ exp.getInfo(name).Set(metric.Value().String())
+}
+
func (exp *exp) publishHistogram(name string, metric metrics.Histogram) {
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
@@ -158,26 +176,28 @@ }
func (exp *exp) publishResettingTimer(name string, metric metrics.ResettingTimer) {
t := metric.Snapshot()
- ps := t.Percentiles([]float64{50, 75, 95, 99})
- exp.getInt(name + ".count").Set(int64(len(t.Values())))
+ ps := t.Percentiles([]float64{0.50, 0.75, 0.95, 0.99})
+ exp.getInt(name + ".count").Set(int64(t.Count()))
exp.getFloat(name + ".mean").Set(t.Mean())
- exp.getInt(name + ".50-percentile").Set(ps[0])
- exp.getInt(name + ".75-percentile").Set(ps[1])
- exp.getInt(name + ".95-percentile").Set(ps[2])
- exp.getInt(name + ".99-percentile").Set(ps[3])
+ exp.getFloat(name + ".50-percentile").Set(ps[0])
+ exp.getFloat(name + ".75-percentile").Set(ps[1])
+ exp.getFloat(name + ".95-percentile").Set(ps[2])
+ exp.getFloat(name + ".99-percentile").Set(ps[3])
}
func (exp *exp) syncToExpvar() {
exp.registry.Each(func(name string, i interface{}) {
switch i := i.(type) {
case metrics.Counter:
- exp.publishCounter(name, i)
+ exp.publishCounter(name, i.Snapshot())
case metrics.CounterFloat64:
- exp.publishCounterFloat64(name, i)
+ exp.publishCounterFloat64(name, i.Snapshot())
case metrics.Gauge:
- exp.publishGauge(name, i)
+ exp.publishGauge(name, i.Snapshot())
case metrics.GaugeFloat64:
- exp.publishGaugeFloat64(name, i)
+ exp.publishGaugeFloat64(name, i.Snapshot())
+ case metrics.GaugeInfo:
+ exp.publishGaugeInfo(name, i.Snapshot())
case metrics.Histogram:
exp.publishHistogram(name, i)
case metrics.Meter:
diff --git ethereum/go-ethereum/metrics/gauge.go taikoxyz/taiko-geth/metrics/gauge.go
index 81137d7f7c5ea3ba77c1a83f8c64f579a2ea7a4e..68f8f11abcd74bc95652b840552612d3702e6c42 100644
--- ethereum/go-ethereum/metrics/gauge.go
+++ taikoxyz/taiko-geth/metrics/gauge.go
@@ -2,13 +2,18 @@ package metrics
import "sync/atomic"
+// gaugeSnapshot contains a readonly int64.
+type GaugeSnapshot interface {
+ Value() int64
+}
+
// Gauges hold an int64 value that can be set arbitrarily.
type Gauge interface {
- Snapshot() Gauge
+ Snapshot() GaugeSnapshot
Update(int64)
+ UpdateIfGt(int64)
Dec(int64)
Inc(int64)
- Value() int64
}
// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
@@ -38,65 +43,20 @@ r.Register(name, c)
return c
}
-// NewFunctionalGauge constructs a new FunctionalGauge.
-func NewFunctionalGauge(f func() int64) Gauge {
- if !Enabled {
- return NilGauge{}
- }
- return &FunctionalGauge{value: f}
-}
-
-// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
-func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
- c := NewFunctionalGauge(f)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// GaugeSnapshot is a read-only copy of another Gauge.
-type GaugeSnapshot int64
-
-// Snapshot returns the snapshot.
-func (g GaugeSnapshot) Snapshot() Gauge { return g }
-
-// Update panics.
-func (GaugeSnapshot) Update(int64) {
- panic("Update called on a GaugeSnapshot")
-}
-
-// Dec panics.
-func (GaugeSnapshot) Dec(int64) {
- panic("Dec called on a GaugeSnapshot")
-}
-
-// Inc panics.
-func (GaugeSnapshot) Inc(int64) {
- panic("Inc called on a GaugeSnapshot")
-}
+// gaugeSnapshot is a read-only copy of another Gauge.
+type gaugeSnapshot int64
// Value returns the value at the time the snapshot was taken.
-func (g GaugeSnapshot) Value() int64 { return int64(g) }
+func (g gaugeSnapshot) Value() int64 { return int64(g) }
// NilGauge is a no-op Gauge.
type NilGauge struct{}
-// Snapshot is a no-op.
-func (NilGauge) Snapshot() Gauge { return NilGauge{} }
-
-// Update is a no-op.
-func (NilGauge) Update(v int64) {}
-
-// Dec is a no-op.
-func (NilGauge) Dec(i int64) {}
-
-// Inc is a no-op.
-func (NilGauge) Inc(i int64) {}
-
-// Value is a no-op.
-func (NilGauge) Value() int64 { return 0 }
+func (NilGauge) Snapshot() GaugeSnapshot { return (*emptySnapshot)(nil) }
+func (NilGauge) Update(v int64) {}
+func (NilGauge) UpdateIfGt(v int64) {}
+func (NilGauge) Dec(i int64) {}
+func (NilGauge) Inc(i int64) {}
// StandardGauge is the standard implementation of a Gauge and uses the
// sync/atomic package to manage a single int64 value.
@@ -105,8 +65,8 @@ value atomic.Int64
}
// Snapshot returns a read-only copy of the gauge.
-func (g *StandardGauge) Snapshot() Gauge {
- return GaugeSnapshot(g.Value())
+func (g *StandardGauge) Snapshot() GaugeSnapshot {
+ return gaugeSnapshot(g.value.Load())
}
// Update updates the gauge's value.
@@ -114,9 +74,17 @@ func (g *StandardGauge) Update(v int64) {
g.value.Store(v)
}
-// Value returns the gauge's current value.
-func (g *StandardGauge) Value() int64 {
- return g.value.Load()
+// Update updates the gauge's value if v is larger then the current valie.
+func (g *StandardGauge) UpdateIfGt(v int64) {
+ for {
+ exist := g.value.Load()
+ if exist >= v {
+ break
+ }
+ if g.value.CompareAndSwap(exist, v) {
+ break
+ }
+ }
}
// Dec decrements the gauge's current value by the given amount.
@@ -128,31 +96,3 @@ // Inc increments the gauge's current value by the given amount.
func (g *StandardGauge) Inc(i int64) {
g.value.Add(i)
}
-
-// FunctionalGauge returns value from given function
-type FunctionalGauge struct {
- value func() int64
-}
-
-// Value returns the gauge's current value.
-func (g FunctionalGauge) Value() int64 {
- return g.value()
-}
-
-// Snapshot returns the snapshot.
-func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
-
-// Update panics.
-func (FunctionalGauge) Update(int64) {
- panic("Update called on a FunctionalGauge")
-}
-
-// Dec panics.
-func (FunctionalGauge) Dec(int64) {
- panic("Dec called on a FunctionalGauge")
-}
-
-// Inc panics.
-func (FunctionalGauge) Inc(int64) {
- panic("Inc called on a FunctionalGauge")
-}
diff --git ethereum/go-ethereum/metrics/gauge_float64.go taikoxyz/taiko-geth/metrics/gauge_float64.go
index 237ff8036e01445b8adfb73c221aba43537e1bb2..967f2bc60e5cf7c3773f04b7d70d8596848ae308 100644
--- ethereum/go-ethereum/metrics/gauge_float64.go
+++ taikoxyz/taiko-geth/metrics/gauge_float64.go
@@ -5,11 +5,14 @@ "math"
"sync/atomic"
)
-// GaugeFloat64s hold a float64 value that can be set arbitrarily.
+type GaugeFloat64Snapshot interface {
+ Value() float64
+}
+
+// GaugeFloat64 hold a float64 value that can be set arbitrarily.
type GaugeFloat64 interface {
- Snapshot() GaugeFloat64
+ Snapshot() GaugeFloat64Snapshot
Update(float64)
- Value() float64
}
// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
@@ -39,49 +42,18 @@ r.Register(name, c)
return c
}
-// NewFunctionalGauge constructs a new FunctionalGauge.
-func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
- if !Enabled {
- return NilGaugeFloat64{}
- }
- return &FunctionalGaugeFloat64{value: f}
-}
-
-// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
-func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
- c := NewFunctionalGaugeFloat64(f)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
-type GaugeFloat64Snapshot float64
-
-// Snapshot returns the snapshot.
-func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
-
-// Update panics.
-func (GaugeFloat64Snapshot) Update(float64) {
- panic("Update called on a GaugeFloat64Snapshot")
-}
+// gaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
+type gaugeFloat64Snapshot float64
// Value returns the value at the time the snapshot was taken.
-func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
+func (g gaugeFloat64Snapshot) Value() float64 { return float64(g) }
// NilGauge is a no-op Gauge.
type NilGaugeFloat64 struct{}
-// Snapshot is a no-op.
-func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
-
-// Update is a no-op.
-func (NilGaugeFloat64) Update(v float64) {}
-
-// Value is a no-op.
-func (NilGaugeFloat64) Value() float64 { return 0.0 }
+func (NilGaugeFloat64) Snapshot() GaugeFloat64Snapshot { return NilGaugeFloat64{} }
+func (NilGaugeFloat64) Update(v float64) {}
+func (NilGaugeFloat64) Value() float64 { return 0.0 }
// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
// atomic to manage a single float64 value.
@@ -90,34 +62,12 @@ floatBits atomic.Uint64
}
// Snapshot returns a read-only copy of the gauge.
-func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
- return GaugeFloat64Snapshot(g.Value())
+func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64Snapshot {
+ v := math.Float64frombits(g.floatBits.Load())
+ return gaugeFloat64Snapshot(v)
}
// Update updates the gauge's value.
func (g *StandardGaugeFloat64) Update(v float64) {
g.floatBits.Store(math.Float64bits(v))
}
-
-// Value returns the gauge's current value.
-func (g *StandardGaugeFloat64) Value() float64 {
- return math.Float64frombits(g.floatBits.Load())
-}
-
-// FunctionalGaugeFloat64 returns value from given function
-type FunctionalGaugeFloat64 struct {
- value func() float64
-}
-
-// Value returns the gauge's current value.
-func (g FunctionalGaugeFloat64) Value() float64 {
- return g.value()
-}
-
-// Snapshot returns the snapshot.
-func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
-
-// Update panics.
-func (FunctionalGaugeFloat64) Update(float64) {
- panic("Update called on a FunctionalGaugeFloat64")
-}
diff --git ethereum/go-ethereum/metrics/gauge_float64_test.go taikoxyz/taiko-geth/metrics/gauge_float64_test.go
index 647d09000935bb47a181095d46b24d0db8ee8d93..f0ac7ea5e7be0ad1f550ba05098f5916bbf6e341 100644
--- ethereum/go-ethereum/metrics/gauge_float64_test.go
+++ taikoxyz/taiko-geth/metrics/gauge_float64_test.go
@@ -26,16 +26,8 @@ wg.Done()
}()
}
wg.Wait()
- if have, want := c.Value(), float64(b.N-1); have != want {
+ if have, want := c.Snapshot().Value(), float64(b.N-1); have != want {
b.Fatalf("have %f want %f", have, want)
- }
-}
-
-func TestGaugeFloat64(t *testing.T) {
- g := NewGaugeFloat64()
- g.Update(47.0)
- if v := g.Value(); 47.0 != v {
- t.Errorf("g.Value(): 47.0 != %v\n", v)
}
}
@@ -53,28 +45,7 @@ func TestGetOrRegisterGaugeFloat64(t *testing.T) {
r := NewRegistry()
NewRegisteredGaugeFloat64("foo", r).Update(47.0)
t.Logf("registry: %v", r)
- if g := GetOrRegisterGaugeFloat64("foo", r); 47.0 != g.Value() {
- t.Fatal(g)
- }
-}
-
-func TestFunctionalGaugeFloat64(t *testing.T) {
- var counter float64
- fg := NewFunctionalGaugeFloat64(func() float64 {
- counter++
- return counter
- })
- fg.Value()
- fg.Value()
- if counter != 2 {
- t.Error("counter != 2")
- }
-}
-
-func TestGetOrRegisterFunctionalGaugeFloat64(t *testing.T) {
- r := NewRegistry()
- NewRegisteredFunctionalGaugeFloat64("foo", r, func() float64 { return 47 })
- if g := GetOrRegisterGaugeFloat64("foo", r); g.Value() != 47 {
+ if g := GetOrRegisterGaugeFloat64("foo", r).Snapshot(); 47.0 != g.Value() {
t.Fatal(g)
}
}
diff --git ethereum/go-ethereum/metrics/gauge_info.go taikoxyz/taiko-geth/metrics/gauge_info.go
new file mode 100644
index 0000000000000000000000000000000000000000..c44b2d85f3ad6565da374ac73c09b18131510da8
--- /dev/null
+++ taikoxyz/taiko-geth/metrics/gauge_info.go
@@ -0,0 +1,84 @@
+package metrics
+
+import (
+ "encoding/json"
+ "sync"
+)
+
+type GaugeInfoSnapshot interface {
+ Value() GaugeInfoValue
+}
+
+// GaugeInfos hold a GaugeInfoValue value that can be set arbitrarily.
+type GaugeInfo interface {
+ Update(GaugeInfoValue)
+ Snapshot() GaugeInfoSnapshot
+}
+
+// GaugeInfoValue is a mapping of keys to values
+type GaugeInfoValue map[string]string
+
+func (val GaugeInfoValue) String() string {
+ data, _ := json.Marshal(val)
+ return string(data)
+}
+
+// GetOrRegisterGaugeInfo returns an existing GaugeInfo or constructs and registers a
+// new StandardGaugeInfo.
+func GetOrRegisterGaugeInfo(name string, r Registry) GaugeInfo {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGaugeInfo()).(GaugeInfo)
+}
+
+// NewGaugeInfo constructs a new StandardGaugeInfo.
+func NewGaugeInfo() GaugeInfo {
+ if !Enabled {
+ return NilGaugeInfo{}
+ }
+ return &StandardGaugeInfo{
+ value: GaugeInfoValue{},
+ }
+}
+
+// NewRegisteredGaugeInfo constructs and registers a new StandardGaugeInfo.
+func NewRegisteredGaugeInfo(name string, r Registry) GaugeInfo {
+ c := NewGaugeInfo()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// gaugeInfoSnapshot is a read-only copy of another GaugeInfo.
+type gaugeInfoSnapshot GaugeInfoValue
+
+// Value returns the value at the time the snapshot was taken.
+func (g gaugeInfoSnapshot) Value() GaugeInfoValue { return GaugeInfoValue(g) }
+
+type NilGaugeInfo struct{}
+
+func (NilGaugeInfo) Snapshot() GaugeInfoSnapshot { return NilGaugeInfo{} }
+func (NilGaugeInfo) Update(v GaugeInfoValue) {}
+func (NilGaugeInfo) Value() GaugeInfoValue { return GaugeInfoValue{} }
+
+// StandardGaugeInfo is the standard implementation of a GaugeInfo and uses
+// sync.Mutex to manage a single string value.
+type StandardGaugeInfo struct {
+ mutex sync.Mutex
+ value GaugeInfoValue
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGaugeInfo) Snapshot() GaugeInfoSnapshot {
+ return gaugeInfoSnapshot(g.value)
+}
+
+// Update updates the gauge's value.
+func (g *StandardGaugeInfo) Update(v GaugeInfoValue) {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ g.value = v
+}
diff --git ethereum/go-ethereum/metrics/gauge_info_test.go taikoxyz/taiko-geth/metrics/gauge_info_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..319afbf92e8f3284050c133a51e7cd9984a806b0
--- /dev/null
+++ taikoxyz/taiko-geth/metrics/gauge_info_test.go
@@ -0,0 +1,36 @@
+package metrics
+
+import (
+ "testing"
+)
+
+func TestGaugeInfoJsonString(t *testing.T) {
+ g := NewGaugeInfo()
+ g.Update(GaugeInfoValue{
+ "chain_id": "5",
+ "anotherKey": "any_string_value",
+ "third_key": "anything",
+ },
+ )
+ want := `{"anotherKey":"any_string_value","chain_id":"5","third_key":"anything"}`
+
+ original := g.Snapshot()
+ g.Update(GaugeInfoValue{"value": "updated"})
+
+ if have := original.Value().String(); have != want {
+ t.Errorf("\nhave: %v\nwant: %v\n", have, want)
+ }
+ if have, want := g.Snapshot().Value().String(), `{"value":"updated"}`; have != want {
+ t.Errorf("\nhave: %v\nwant: %v\n", have, want)
+ }
+}
+
+func TestGetOrRegisterGaugeInfo(t *testing.T) {
+ r := NewRegistry()
+ NewRegisteredGaugeInfo("foo", r).Update(
+ GaugeInfoValue{"chain_id": "5"})
+ g := GetOrRegisterGaugeInfo("foo", r).Snapshot()
+ if have, want := g.Value().String(), `{"chain_id":"5"}`; have != want {
+ t.Errorf("have\n%v\nwant\n%v\n", have, want)
+ }
+}
diff --git ethereum/go-ethereum/metrics/gauge_test.go taikoxyz/taiko-geth/metrics/gauge_test.go
index a98fe985d8c280696dec6151e59253c46551efbf..f2ba930bc465993cf2aea611197eae9deccc0bcc 100644
--- ethereum/go-ethereum/metrics/gauge_test.go
+++ taikoxyz/taiko-geth/metrics/gauge_test.go
@@ -1,7 +1,6 @@
package metrics
import (
- "fmt"
"testing"
)
@@ -13,14 +12,6 @@ g.Update(int64(i))
}
}
-func TestGauge(t *testing.T) {
- g := NewGauge()
- g.Update(int64(47))
- if v := g.Value(); v != 47 {
- t.Errorf("g.Value(): 47 != %v\n", v)
- }
-}
-
func TestGaugeSnapshot(t *testing.T) {
g := NewGauge()
g.Update(int64(47))
@@ -34,35 +25,7 @@
func TestGetOrRegisterGauge(t *testing.T) {
r := NewRegistry()
NewRegisteredGauge("foo", r).Update(47)
- if g := GetOrRegisterGauge("foo", r); g.Value() != 47 {
+ if g := GetOrRegisterGauge("foo", r); g.Snapshot().Value() != 47 {
t.Fatal(g)
}
}
-
-func TestFunctionalGauge(t *testing.T) {
- var counter int64
- fg := NewFunctionalGauge(func() int64 {
- counter++
- return counter
- })
- fg.Value()
- fg.Value()
- if counter != 2 {
- t.Error("counter != 2")
- }
-}
-
-func TestGetOrRegisterFunctionalGauge(t *testing.T) {
- r := NewRegistry()
- NewRegisteredFunctionalGauge("foo", r, func() int64 { return 47 })
- if g := GetOrRegisterGauge("foo", r); g.Value() != 47 {
- t.Fatal(g)
- }
-}
-
-func ExampleGetOrRegisterGauge() {
- m := "server.bytes_sent"
- g := GetOrRegisterGauge(m, nil)
- g.Update(47)
- fmt.Println(g.Value()) // Output: 47
-}
diff --git ethereum/go-ethereum/metrics/graphite.go taikoxyz/taiko-geth/metrics/graphite.go
index 29f72b0c4181109f4a4c5b98d707b9f3db8986a2..aba752e0ed5eae6ef0e5aae4d44042d4e664ea86 100644
--- ethereum/go-ethereum/metrics/graphite.go
+++ taikoxyz/taiko-geth/metrics/graphite.go
@@ -66,13 +66,15 @@ w := bufio.NewWriter(conn)
c.Registry.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Snapshot().Count(), now)
case CounterFloat64:
- fmt.Fprintf(w, "%s.%s.count %f %d\n", c.Prefix, name, metric.Count(), now)
+ fmt.Fprintf(w, "%s.%s.count %f %d\n", c.Prefix, name, metric.Snapshot().Count(), now)
case Gauge:
- fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
+ fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Snapshot().Value(), now)
case GaugeFloat64:
- fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
+ fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Snapshot().Value(), now)
+ case GaugeInfo:
+ fmt.Fprintf(w, "%s.%s.value %s %d\n", c.Prefix, name, metric.Snapshot().Value().String(), now)
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles(c.Percentiles)
diff --git ethereum/go-ethereum/metrics/histogram.go taikoxyz/taiko-geth/metrics/histogram.go
index 2c54ce8b4063e06109bd07868ed54b7e8dbd2aa3..44de588bc1dc8b03d5c4221eff2de593627d3a81 100644
--- ethereum/go-ethereum/metrics/histogram.go
+++ taikoxyz/taiko-geth/metrics/histogram.go
@@ -1,20 +1,14 @@
package metrics
+type HistogramSnapshot interface {
+ SampleSnapshot
+}
+
// Histograms calculate distribution statistics from a series of int64 values.
type Histogram interface {
Clear()
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Sample() Sample
- Snapshot() Histogram
- StdDev() float64
- Sum() int64
Update(int64)
- Variance() float64
+ Snapshot() HistogramSnapshot
}
// GetOrRegisterHistogram returns an existing Histogram or constructs and
@@ -54,108 +48,12 @@ r.Register(name, c)
return c
}
-// HistogramSnapshot is a read-only copy of another Histogram.
-type HistogramSnapshot struct {
- sample *SampleSnapshot
-}
-
-// Clear panics.
-func (*HistogramSnapshot) Clear() {
- panic("Clear called on a HistogramSnapshot")
-}
-
-// Count returns the number of samples recorded at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample at the time the snapshot
-// was taken.
-func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) Percentile(p float64) float64 {
- return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the sample
-// at the time the snapshot was taken.
-func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
- return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *HistogramSnapshot) Sample() Sample { return h.sample }
-
-// Snapshot returns the snapshot.
-func (h *HistogramSnapshot) Snapshot() Histogram { return h }
-
-// StdDev returns the standard deviation of the values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample at the time the snapshot was taken.
-func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
-
-// Update panics.
-func (*HistogramSnapshot) Update(int64) {
- panic("Update called on a HistogramSnapshot")
-}
-
-// Variance returns the variance of inputs at the time the snapshot was taken.
-func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
-
// NilHistogram is a no-op Histogram.
type NilHistogram struct{}
-// Clear is a no-op.
-func (NilHistogram) Clear() {}
-
-// Count is a no-op.
-func (NilHistogram) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilHistogram) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilHistogram) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilHistogram) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilHistogram) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Sample is a no-op.
-func (NilHistogram) Sample() Sample { return NilSample{} }
-
-// Snapshot is a no-op.
-func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
-
-// StdDev is a no-op.
-func (NilHistogram) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilHistogram) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilHistogram) Update(v int64) {}
-
-// Variance is a no-op.
-func (NilHistogram) Variance() float64 { return 0.0 }
+func (NilHistogram) Clear() {}
+func (NilHistogram) Snapshot() HistogramSnapshot { return (*emptySnapshot)(nil) }
+func (NilHistogram) Update(v int64) {}
// StandardHistogram is the standard implementation of a Histogram and uses a
// Sample to bound its memory use.
@@ -166,46 +64,10 @@
// Clear clears the histogram and its sample.
func (h *StandardHistogram) Clear() { h.sample.Clear() }
-// Count returns the number of samples recorded since the histogram was last
-// cleared.
-func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample.
-func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample.
-func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample.
-func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (h *StandardHistogram) Percentile(p float64) float64 {
- return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
- return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *StandardHistogram) Sample() Sample { return h.sample }
-
// Snapshot returns a read-only copy of the histogram.
-func (h *StandardHistogram) Snapshot() Histogram {
- return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+func (h *StandardHistogram) Snapshot() HistogramSnapshot {
+ return h.sample.Snapshot()
}
-// StdDev returns the standard deviation of the values in the sample.
-func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample.
-func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
-
// Update samples a new value.
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
-
-// Variance returns the variance of the values in the sample.
-func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git ethereum/go-ethereum/metrics/histogram_test.go taikoxyz/taiko-geth/metrics/histogram_test.go
index 7c9f42fcec962904eb5c53b1eb3eedc5c3d0e887..22fc5468b0b5dc1697d4650b384448f6229046b5 100644
--- ethereum/go-ethereum/metrics/histogram_test.go
+++ taikoxyz/taiko-geth/metrics/histogram_test.go
@@ -14,7 +14,7 @@ func TestGetOrRegisterHistogram(t *testing.T) {
r := NewRegistry()
s := NewUniformSample(100)
NewRegisteredHistogram("foo", r, s).Update(47)
- if h := GetOrRegisterHistogram("foo", r, s); h.Count() != 1 {
+ if h := GetOrRegisterHistogram("foo", r, s).Snapshot(); h.Count() != 1 {
t.Fatal(h)
}
}
@@ -24,11 +24,11 @@ h := NewHistogram(NewUniformSample(100000))
for i := 1; i <= 10000; i++ {
h.Update(int64(i))
}
- testHistogram10000(t, h)
+ testHistogram10000(t, h.Snapshot())
}
func TestHistogramEmpty(t *testing.T) {
- h := NewHistogram(NewUniformSample(100))
+ h := NewHistogram(NewUniformSample(100)).Snapshot()
if count := h.Count(); count != 0 {
t.Errorf("h.Count(): 0 != %v\n", count)
}
@@ -66,7 +66,7 @@ h.Update(0)
testHistogram10000(t, snapshot)
}
-func testHistogram10000(t *testing.T, h Histogram) {
+func testHistogram10000(t *testing.T, h HistogramSnapshot) {
if count := h.Count(); count != 10000 {
t.Errorf("h.Count(): 10000 != %v\n", count)
}
diff --git ethereum/go-ethereum/metrics/inactive.go taikoxyz/taiko-geth/metrics/inactive.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f47f0210af30ddc0fe5c550901e33cdc39b3dea
--- /dev/null
+++ taikoxyz/taiko-geth/metrics/inactive.go
@@ -0,0 +1,48 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package metrics
+
+// compile-time checks that interfaces are implemented.
+var (
+ _ SampleSnapshot = (*emptySnapshot)(nil)
+ _ HistogramSnapshot = (*emptySnapshot)(nil)
+ _ CounterSnapshot = (*emptySnapshot)(nil)
+ _ GaugeSnapshot = (*emptySnapshot)(nil)
+ _ MeterSnapshot = (*emptySnapshot)(nil)
+ _ EWMASnapshot = (*emptySnapshot)(nil)
+ _ TimerSnapshot = (*emptySnapshot)(nil)
+)
+
+type emptySnapshot struct{}
+
+func (*emptySnapshot) Count() int64 { return 0 }
+func (*emptySnapshot) Max() int64 { return 0 }
+func (*emptySnapshot) Mean() float64 { return 0.0 }
+func (*emptySnapshot) Min() int64 { return 0 }
+func (*emptySnapshot) Percentile(p float64) float64 { return 0.0 }
+func (*emptySnapshot) Percentiles(ps []float64) []float64 { return make([]float64, len(ps)) }
+func (*emptySnapshot) Size() int { return 0 }
+func (*emptySnapshot) StdDev() float64 { return 0.0 }
+func (*emptySnapshot) Sum() int64 { return 0 }
+func (*emptySnapshot) Values() []int64 { return []int64{} }
+func (*emptySnapshot) Variance() float64 { return 0.0 }
+func (*emptySnapshot) Value() int64 { return 0 }
+func (*emptySnapshot) Rate() float64 { return 0.0 }
+func (*emptySnapshot) Rate1() float64 { return 0.0 }
+func (*emptySnapshot) Rate5() float64 { return 0.0 }
+func (*emptySnapshot) Rate15() float64 { return 0.0 }
+func (*emptySnapshot) RateMean() float64 { return 0.0 }
diff --git ethereum/go-ethereum/metrics/influxdb/influxdb.go taikoxyz/taiko-geth/metrics/influxdb/influxdb.go
index 5dfbbab3edeb6b4ad2271e93c617e8ddbac41ce5..bbc4fc024b34fe6df61f64871a42efa2e7ffdab7 100644
--- ethereum/go-ethereum/metrics/influxdb/influxdb.go
+++ taikoxyz/taiko-geth/metrics/influxdb/influxdb.go
@@ -11,13 +11,13 @@ switch metric := i.(type) {
case metrics.Counter:
measurement := fmt.Sprintf("%s%s.count", namespace, name)
fields := map[string]interface{}{
- "value": metric.Count(),
+ "value": metric.Snapshot().Count(),
}
return measurement, fields
case metrics.CounterFloat64:
measurement := fmt.Sprintf("%s%s.count", namespace, name)
fields := map[string]interface{}{
- "value": metric.Count(),
+ "value": metric.Snapshot().Count(),
}
return measurement, fields
case metrics.Gauge:
@@ -30,6 +30,13 @@ case metrics.GaugeFloat64:
measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
fields := map[string]interface{}{
"value": metric.Snapshot().Value(),
+ }
+ return measurement, fields
+ case metrics.GaugeInfo:
+ ms := metric.Snapshot()
+ measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
+ fields := map[string]interface{}{
+ "value": ms.Value().String(),
}
return measurement, fields
case metrics.Histogram:
@@ -92,20 +99,19 @@ }
return measurement, fields
case metrics.ResettingTimer:
t := metric.Snapshot()
- if len(t.Values()) == 0 {
+ if t.Count() == 0 {
break
}
- ps := t.Percentiles([]float64{50, 95, 99})
- val := t.Values()
+ ps := t.Percentiles([]float64{0.50, 0.95, 0.99})
measurement := fmt.Sprintf("%s%s.span", namespace, name)
fields := map[string]interface{}{
- "count": len(val),
- "max": val[len(val)-1],
+ "count": t.Count(),
+ "max": t.Max(),
"mean": t.Mean(),
- "min": val[0],
- "p50": ps[0],
- "p95": ps[1],
- "p99": ps[2],
+ "min": t.Min(),
+ "p50": int(ps[0]),
+ "p95": int(ps[1]),
+ "p99": int(ps[2]),
}
return measurement, fields
}
diff --git ethereum/go-ethereum/metrics/influxdb/influxdb_test.go taikoxyz/taiko-geth/metrics/influxdb/influxdb_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..c6f2eeac62771186603424165133a5dc884a8f8d
--- /dev/null
+++ taikoxyz/taiko-geth/metrics/influxdb/influxdb_test.go
@@ -0,0 +1,114 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package influxdb
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/metrics/internal"
+ influxdb2 "github.com/influxdata/influxdb-client-go/v2"
+)
+
+func TestMain(m *testing.M) {
+ metrics.Enabled = true
+ os.Exit(m.Run())
+}
+
+func TestExampleV1(t *testing.T) {
+ r := internal.ExampleMetrics()
+ var have, want string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ haveB, _ := io.ReadAll(r.Body)
+ have = string(haveB)
+ r.Body.Close()
+ }))
+ defer ts.Close()
+ u, _ := url.Parse(ts.URL)
+ rep := &reporter{
+ reg: r,
+ url: *u,
+ namespace: "goth.",
+ }
+ if err := rep.makeClient(); err != nil {
+ t.Fatal(err)
+ }
+ if err := rep.send(978307200); err != nil {
+ t.Fatal(err)
+ }
+ if wantB, err := os.ReadFile("./testdata/influxdbv1.want"); err != nil {
+ t.Fatal(err)
+ } else {
+ want = string(wantB)
+ }
+ if have != want {
+ t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want)
+ t.Logf("have vs want:\n%v", findFirstDiffPos(have, want))
+ }
+}
+
+func TestExampleV2(t *testing.T) {
+ r := internal.ExampleMetrics()
+ var have, want string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ haveB, _ := io.ReadAll(r.Body)
+ have = string(haveB)
+ r.Body.Close()
+ }))
+ defer ts.Close()
+
+ rep := &v2Reporter{
+ reg: r,
+ endpoint: ts.URL,
+ namespace: "goth.",
+ }
+ rep.client = influxdb2.NewClient(rep.endpoint, rep.token)
+ defer rep.client.Close()
+ rep.write = rep.client.WriteAPI(rep.organization, rep.bucket)
+
+ rep.send(978307200)
+
+ if wantB, err := os.ReadFile("./testdata/influxdbv2.want"); err != nil {
+ t.Fatal(err)
+ } else {
+ want = string(wantB)
+ }
+ if have != want {
+ t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want)
+ t.Logf("have vs want:\n%v", findFirstDiffPos(have, want))
+ }
+}
+
+func findFirstDiffPos(a, b string) string {
+ yy := strings.Split(b, "\n")
+ for i, x := range strings.Split(a, "\n") {
+ if i >= len(yy) {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: <EOF>", i, x, i)
+ }
+ if y := yy[i]; x != y {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y)
+ }
+ }
+ return ""
+}
diff --git ethereum/go-ethereum/metrics/influxdb/influxdbv1.go taikoxyz/taiko-geth/metrics/influxdb/influxdbv1.go
index f65d30ef959097cab6530f46acc5694a0e4e86b0..ac58280803bb16eb425538fee57f073f829661a2 100644
--- ethereum/go-ethereum/metrics/influxdb/influxdbv1.go
+++ taikoxyz/taiko-geth/metrics/influxdb/influxdbv1.go
@@ -79,7 +79,7 @@ if err := rep.makeClient(); err != nil {
return fmt.Errorf("unable to make InfluxDB client. err: %v", err)
}
- if err := rep.send(); err != nil {
+ if err := rep.send(0); err != nil {
return fmt.Errorf("unable to send to InfluxDB. err: %v", err)
}
@@ -107,7 +107,7 @@
for {
select {
case <-intervalTicker.C:
- if err := r.send(); err != nil {
+ if err := r.send(0); err != nil {
log.Warn("Unable to send to InfluxDB", "err", err)
}
case <-pingTicker.C:
@@ -123,7 +123,9 @@ }
}
}
-func (r *reporter) send() error {
+// send sends the measurements. If provided tstamp is >0, it is used. Otherwise,
+// a 'fresh' timestamp is used.
+func (r *reporter) send(tstamp int64) error {
bps, err := client.NewBatchPoints(
client.BatchPointsConfig{
Database: r.database,
@@ -132,7 +134,12 @@ if err != nil {
return err
}
r.reg.Each(func(name string, i interface{}) {
- now := time.Now()
+ var now time.Time
+ if tstamp <= 0 {
+ now = time.Now()
+ } else {
+ now = time.Unix(tstamp, 0)
+ }
measurement, fields := readMeter(r.namespace, name, i)
if fields == nil {
return
diff --git ethereum/go-ethereum/metrics/influxdb/influxdbv2.go taikoxyz/taiko-geth/metrics/influxdb/influxdbv2.go
index 7984898f32594c5481a69032083aec1c94fabbea..0be5137d5ee14b0d3575e378138adb4b5811247e 100644
--- ethereum/go-ethereum/metrics/influxdb/influxdbv2.go
+++ taikoxyz/taiko-geth/metrics/influxdb/influxdbv2.go
@@ -64,7 +64,7 @@
for {
select {
case <-intervalTicker.C:
- r.send()
+ r.send(0)
case <-pingTicker.C:
_, err := r.client.Health(context.Background())
if err != nil {
@@ -74,9 +74,16 @@ }
}
}
-func (r *v2Reporter) send() {
+// send sends the measurements. If provided tstamp is >0, it is used. Otherwise,
+// a 'fresh' timestamp is used.
+func (r *v2Reporter) send(tstamp int64) {
r.reg.Each(func(name string, i interface{}) {
- now := time.Now()
+ var now time.Time
+ if tstamp <= 0 {
+ now = time.Now()
+ } else {
+ now = time.Unix(tstamp, 0)
+ }
measurement, fields := readMeter(r.namespace, name, i)
if fields == nil {
return
diff --git ethereum/go-ethereum/metrics/influxdb/testdata/influxdbv1.want taikoxyz/taiko-geth/metrics/influxdb/testdata/influxdbv1.want
new file mode 100644
index 0000000000000000000000000000000000000000..9443faedc5a2b3d3b5726670c7ecc126756fba2b
--- /dev/null
+++ taikoxyz/taiko-geth/metrics/influxdb/testdata/influxdbv1.want
@@ -0,0 +1,11 @@
+goth.system/cpu/schedlatency.histogram count=5645i,max=41943040i,mean=1819544.0410983171,min=0i,p25=0,p50=0,p75=7168,p95=16777216,p99=29360128,p999=33554432,p9999=33554432,stddev=6393570.217198883,variance=40877740122252.57 978307200000000000
+goth.system/memory/pauses.histogram count=14i,max=229376i,mean=50066.28571428572,min=5120i,p25=10240,p50=32768,p75=57344,p95=196608,p99=196608,p999=196608,p9999=196608,stddev=54726.062410783874,variance=2994941906.9890113 978307200000000000
+goth.test/counter.count value=12345 978307200000000000
+goth.test/counter_float64.count value=54321.98 978307200000000000
+goth.test/gauge.gauge value=23456i 978307200000000000
+goth.test/gauge_float64.gauge value=34567.89 978307200000000000
+goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000
+goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000
+goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000
+goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000i,p95=120000000i,p99=120000000i 978307200000000000
+goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000
diff --git ethereum/go-ethereum/metrics/influxdb/testdata/influxdbv2.want taikoxyz/taiko-geth/metrics/influxdb/testdata/influxdbv2.want
new file mode 100644
index 0000000000000000000000000000000000000000..9443faedc5a2b3d3b5726670c7ecc126756fba2b
--- /dev/null
+++ taikoxyz/taiko-geth/metrics/influxdb/testdata/influxdbv2.want
@@ -0,0 +1,11 @@
+goth.system/cpu/schedlatency.histogram count=5645i,max=41943040i,mean=1819544.0410983171,min=0i,p25=0,p50=0,p75=7168,p95=16777216,p99=29360128,p999=33554432,p9999=33554432,stddev=6393570.217198883,variance=40877740122252.57 978307200000000000
+goth.system/memory/pauses.histogram count=14i,max=229376i,mean=50066.28571428572,min=5120i,p25=10240,p50=32768,p75=57344,p95=196608,p99=196608,p999=196608,p9999=196608,stddev=54726.062410783874,variance=2994941906.9890113 978307200000000000
+goth.test/counter.count value=12345 978307200000000000
+goth.test/counter_float64.count value=54321.98 978307200000000000
+goth.test/gauge.gauge value=23456i 978307200000000000
+goth.test/gauge_float64.gauge value=34567.89 978307200000000000
+goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000
+goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000
+goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000
+goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000i,p95=120000000i,p99=120000000i 978307200000000000
+goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000
diff --git ethereum/go-ethereum/metrics/internal/sampledata.go taikoxyz/taiko-geth/metrics/internal/sampledata.go
new file mode 100644
index 0000000000000000000000000000000000000000..de9b207b6d4ad903069717fe8f5bd3d6270006a9
--- /dev/null
+++ taikoxyz/taiko-geth/metrics/internal/sampledata.go
@@ -0,0 +1,95 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package internal
+
+import (
+ "bytes"
+ "encoding/gob"
+ metrics2 "runtime/metrics"
+ "time"
+
+ "github.com/ethereum/go-ethereum/metrics"
+)
+
+// ExampleMetrics returns an ordered registry populated with a sample of metrics.
+func ExampleMetrics() metrics.Registry {
+ var registry = metrics.NewOrderedRegistry()
+
+ metrics.NewRegisteredCounterFloat64("test/counter", registry).Inc(12345)
+ metrics.NewRegisteredCounterFloat64("test/counter_float64", registry).Inc(54321.98)
+ metrics.NewRegisteredGauge("test/gauge", registry).Update(23456)
+ metrics.NewRegisteredGaugeFloat64("test/gauge_float64", registry).Update(34567.89)
+ metrics.NewRegisteredGaugeInfo("test/gauge_info", registry).Update(
+ metrics.GaugeInfoValue{
+ "version": "1.10.18-unstable",
+ "arch": "amd64",
+ "os": "linux",
+ "commit": "7caa2d8163ae3132c1c2d6978c76610caee2d949",
+ "protocol_versions": "64 65 66",
+ })
+
+ {
+ s := metrics.NewUniformSample(3)
+ s.Update(1)
+ s.Update(2)
+ s.Update(3)
+ //metrics.NewRegisteredHistogram("test/histogram", registry, metrics.NewSampleSnapshot(3, []int64{1, 2, 3}))
+ metrics.NewRegisteredHistogram("test/histogram", registry, s)
+ }
+ registry.Register("test/meter", metrics.NewInactiveMeter())
+ {
+ timer := metrics.NewRegisteredResettingTimer("test/resetting_timer", registry)
+ timer.Update(10 * time.Millisecond)
+ timer.Update(11 * time.Millisecond)
+ timer.Update(12 * time.Millisecond)
+ timer.Update(120 * time.Millisecond)
+ timer.Update(13 * time.Millisecond)
+ timer.Update(14 * time.Millisecond)
+ }
+ {
+ timer := metrics.NewRegisteredTimer("test/timer", registry)
+ timer.Update(20 * time.Millisecond)
+ timer.Update(21 * time.Millisecond)
+ timer.Update(22 * time.Millisecond)
+ timer.Update(120 * time.Millisecond)
+ timer.Update(23 * time.Millisecond)
+ timer.Update(24 * time.Millisecond)
+ timer.Stop()
+ }
+ registry.Register("test/empty_resetting_timer", metrics.NewResettingTimer().Snapshot())
+
+ { // go runtime metrics
+ var sLatency = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06T\xff\x82\x01\xff\xa2\x00\xfe\r\xef\x00\x01\x02\x02\x04\x05\x04\b\x15\x17 B?6.L;$!2) \x1a? \x190aH7FY6#\x190\x1d\x14\x10\x1b\r\t\x04\x03\x01\x01\x00\x03\x02\x00\x03\x05\x05\x02\x02\x06\x04\v\x06\n\x15\x18\x13'&.\x12=H/L&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00"
+ var gcPauses = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06R\xff\x82\x01\xff\xa2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x00\x01\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x00\x02\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00"
+
+ var secondsToNs = float64(time.Second)
+
+ dserialize := func(data string) *metrics2.Float64Histogram {
+ var res metrics2.Float64Histogram
+ if err := gob.NewDecoder(bytes.NewReader([]byte(data))).Decode(&res); err != nil {
+ panic(err)
+ }
+ return &res
+ }
+ cpuSchedLatency := metrics.RuntimeHistogramFromData(secondsToNs, dserialize(sLatency))
+ registry.Register("system/cpu/schedlatency", cpuSchedLatency)
+
+ memPauses := metrics.RuntimeHistogramFromData(secondsToNs, dserialize(gcPauses))
+ registry.Register("system/memory/pauses", memPauses)
+ }
+ return registry
+}
diff --git ethereum/go-ethereum/metrics/internal/sampledata_test.go taikoxyz/taiko-geth/metrics/internal/sampledata_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..00132994064e36f4ca88f38bb4ad197b1facc37f
--- /dev/null
+++ taikoxyz/taiko-geth/metrics/internal/sampledata_test.go
@@ -0,0 +1,27 @@
+package internal
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ metrics2 "runtime/metrics"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/metrics"
+)
+
+func TestCollectRuntimeMetrics(t *testing.T) {
+ t.Skip("Only used for generating testdata")
+ serialize := func(path string, histogram *metrics2.Float64Histogram) {
+ var f = new(bytes.Buffer)
+ if err := gob.NewEncoder(f).Encode(histogram); err != nil {
+ panic(err)
+ }
+ fmt.Printf("var %v = %q\n", path, f.Bytes())
+ }
+ time.Sleep(2 * time.Second)
+ stats := metrics.ReadRuntimeStats()
+ serialize("schedlatency", stats.SchedLatency)
+ serialize("gcpauses", stats.GCPauses)
+}
diff --git ethereum/go-ethereum/metrics/librato/librato.go taikoxyz/taiko-geth/metrics/librato/librato.go
index 3d45f4c7be1be69cf9e8643590237dde01448a78..a86f758637869f15e7372ba1f684119c07586763 100644
--- ethereum/go-ethereum/metrics/librato/librato.go
+++ taikoxyz/taiko-geth/metrics/librato/librato.go
@@ -61,16 +61,16 @@ }
// calculate sum of squares from data provided by metrics.Histogram
// see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
-func sumSquares(s metrics.Sample) float64 {
- count := float64(s.Count())
- sumSquared := math.Pow(count*s.Mean(), 2)
- sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count
+func sumSquares(icount int64, mean, stDev float64) float64 {
+ count := float64(icount)
+ sumSquared := math.Pow(count*mean, 2)
+ sumSquares := math.Pow(count*stDev, 2) + sumSquared/count
if math.IsNaN(sumSquares) {
return 0.0
}
return sumSquares
}
-func sumSquaresTimer(t metrics.Timer) float64 {
+func sumSquaresTimer(t metrics.TimerSnapshot) float64 {
count := float64(t.Count())
sumSquared := math.Pow(count*t.Mean(), 2)
sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count
@@ -97,9 +97,10 @@ measurement := Measurement{}
measurement[Period] = rep.Interval.Seconds()
switch m := metric.(type) {
case metrics.Counter:
- if m.Count() > 0 {
+ ms := m.Snapshot()
+ if ms.Count() > 0 {
measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
- measurement[Value] = float64(m.Count())
+ measurement[Value] = float64(ms.Count())
measurement[Attributes] = map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
@@ -108,9 +109,9 @@ }
snapshot.Counters = append(snapshot.Counters, measurement)
}
case metrics.CounterFloat64:
- if m.Count() > 0 {
+ if count := m.Snapshot().Count(); count > 0 {
measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
- measurement[Value] = m.Count()
+ measurement[Value] = count
measurement[Attributes] = map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
@@ -120,40 +121,45 @@ snapshot.Counters = append(snapshot.Counters, measurement)
}
case metrics.Gauge:
measurement[Name] = name
- measurement[Value] = float64(m.Value())
+ measurement[Value] = float64(m.Snapshot().Value())
snapshot.Gauges = append(snapshot.Gauges, measurement)
case metrics.GaugeFloat64:
measurement[Name] = name
- measurement[Value] = m.Value()
+ measurement[Value] = m.Snapshot().Value()
+ snapshot.Gauges = append(snapshot.Gauges, measurement)
+ case metrics.GaugeInfo:
+ measurement[Name] = name
+ measurement[Value] = m.Snapshot().Value()
snapshot.Gauges = append(snapshot.Gauges, measurement)
case metrics.Histogram:
- if m.Count() > 0 {
+ ms := m.Snapshot()
+ if ms.Count() > 0 {
gauges := make([]Measurement, histogramGaugeCount)
- s := m.Sample()
measurement[Name] = fmt.Sprintf("%s.%s", name, "hist")
- measurement[Count] = uint64(s.Count())
- measurement[Max] = float64(s.Max())
- measurement[Min] = float64(s.Min())
- measurement[Sum] = float64(s.Sum())
- measurement[SumSquares] = sumSquares(s)
+ measurement[Count] = uint64(ms.Count())
+ measurement[Max] = float64(ms.Max())
+ measurement[Min] = float64(ms.Min())
+ measurement[Sum] = float64(ms.Sum())
+ measurement[SumSquares] = sumSquares(ms.Count(), ms.Mean(), ms.StdDev())
gauges[0] = measurement
for i, p := range rep.Percentiles {
gauges[i+1] = Measurement{
Name: fmt.Sprintf("%s.%.2f", measurement[Name], p),
- Value: s.Percentile(p),
+ Value: ms.Percentile(p),
Period: measurement[Period],
}
}
snapshot.Gauges = append(snapshot.Gauges, gauges...)
}
case metrics.Meter:
+ ms := m.Snapshot()
measurement[Name] = name
- measurement[Value] = float64(m.Count())
+ measurement[Value] = float64(ms.Count())
snapshot.Counters = append(snapshot.Counters, measurement)
snapshot.Gauges = append(snapshot.Gauges,
Measurement{
Name: fmt.Sprintf("%s.%s", name, "1min"),
- Value: m.Rate1(),
+ Value: ms.Rate1(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
@@ -163,7 +169,7 @@ },
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "5min"),
- Value: m.Rate5(),
+ Value: ms.Rate5(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
@@ -173,7 +179,7 @@ },
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "15min"),
- Value: m.Rate15(),
+ Value: ms.Rate15(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
@@ -183,26 +189,27 @@ },
},
)
case metrics.Timer:
+ ms := m.Snapshot()
measurement[Name] = name
- measurement[Value] = float64(m.Count())
+ measurement[Value] = float64(ms.Count())
snapshot.Counters = append(snapshot.Counters, measurement)
- if m.Count() > 0 {
+ if ms.Count() > 0 {
libratoName := fmt.Sprintf("%s.%s", name, "timer.mean")
gauges := make([]Measurement, histogramGaugeCount)
gauges[0] = Measurement{
Name: libratoName,
- Count: uint64(m.Count()),
- Sum: m.Mean() * float64(m.Count()),
- Max: float64(m.Max()),
- Min: float64(m.Min()),
- SumSquares: sumSquaresTimer(m),
+ Count: uint64(ms.Count()),
+ Sum: ms.Mean() * float64(ms.Count()),
+ Max: float64(ms.Max()),
+ Min: float64(ms.Min()),
+ SumSquares: sumSquaresTimer(ms),
Period: int64(rep.Interval.Seconds()),
Attributes: rep.TimerAttributes,
}
for i, p := range rep.Percentiles {
gauges[i+1] = Measurement{
Name: fmt.Sprintf("%s.timer.%2.0f", name, p*100),
- Value: m.Percentile(p),
+ Value: ms.Percentile(p),
Period: int64(rep.Interval.Seconds()),
Attributes: rep.TimerAttributes,
}
@@ -211,7 +218,7 @@ snapshot.Gauges = append(snapshot.Gauges, gauges...)
snapshot.Gauges = append(snapshot.Gauges,
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.1min"),
- Value: m.Rate1(),
+ Value: ms.Rate1(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
@@ -221,7 +228,7 @@ },
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.5min"),
- Value: m.Rate5(),
+ Value: ms.Rate5(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
@@ -231,7 +238,7 @@ },
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.15min"),
- Value: m.Rate15(),
+ Value: ms.Rate15(),
Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
diff --git ethereum/go-ethereum/metrics/log.go taikoxyz/taiko-geth/metrics/log.go
index d1ce627a8378ba164cf592aac40711269645efe0..3b9773faa7287d23fa47501f8f253677a48a4b86 100644
--- ethereum/go-ethereum/metrics/log.go
+++ taikoxyz/taiko-geth/metrics/log.go
@@ -23,16 +23,19 @@ r.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
l.Printf("counter %s\n", name)
- l.Printf(" count: %9d\n", metric.Count())
+ l.Printf(" count: %9d\n", metric.Snapshot().Count())
case CounterFloat64:
l.Printf("counter %s\n", name)
- l.Printf(" count: %f\n", metric.Count())
+ l.Printf(" count: %f\n", metric.Snapshot().Count())
case Gauge:
l.Printf("gauge %s\n", name)
- l.Printf(" value: %9d\n", metric.Value())
+ l.Printf(" value: %9d\n", metric.Snapshot().Value())
case GaugeFloat64:
l.Printf("gauge %s\n", name)
- l.Printf(" value: %f\n", metric.Value())
+ l.Printf(" value: %f\n", metric.Snapshot().Value())
+ case GaugeInfo:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %s\n", metric.Snapshot().Value())
case Healthcheck:
metric.Check()
l.Printf("healthcheck %s\n", name)
diff --git ethereum/go-ethereum/metrics/meter.go taikoxyz/taiko-geth/metrics/meter.go
index e8564d6a5e76f1615b8ea0decf65f1aa821efeb6..22475ef6ebee8863af3277534eaedf0a17cf126f 100644
--- ethereum/go-ethereum/metrics/meter.go
+++ taikoxyz/taiko-geth/metrics/meter.go
@@ -1,21 +1,25 @@
package metrics
import (
+ "math"
"sync"
"sync/atomic"
"time"
)
-// Meters count events to produce exponentially-weighted moving average rates
-// at one-, five-, and fifteen-minutes and a mean rate.
-type Meter interface {
+type MeterSnapshot interface {
Count() int64
- Mark(int64)
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
- Snapshot() Meter
+}
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+ Mark(int64)
+ Snapshot() MeterSnapshot
Stop()
}
@@ -30,17 +34,6 @@ }
return r.GetOrRegister(name, NewMeter).(Meter)
}
-// GetOrRegisterMeterForced returns an existing Meter or constructs and registers a
-// new StandardMeter no matter the global switch is enabled or not.
-// Be sure to unregister the meter from the registry once it is of no use to
-// allow for garbage collection.
-func GetOrRegisterMeterForced(name string, r Registry) Meter {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewMeterForced).(Meter)
-}
-
// NewMeter constructs a new StandardMeter and launches a goroutine.
// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
func NewMeter() Meter {
@@ -58,18 +51,13 @@ }
return m
}
-// NewMeterForced constructs a new StandardMeter and launches a goroutine no matter
-// the global switch is enabled or not.
-// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
-func NewMeterForced() Meter {
- m := newStandardMeter()
- arbiter.Lock()
- defer arbiter.Unlock()
- arbiter.meters[m] = struct{}{}
- if !arbiter.started {
- arbiter.started = true
- go arbiter.tick()
+// NewInactiveMeter returns a meter but does not start any goroutines. This
+// method is mainly intended for testing.
+func NewInactiveMeter() Meter {
+ if !Enabled {
+ return NilMeter{}
}
+ m := newStandardMeter()
return m
}
@@ -78,95 +66,48 @@ // and launches a goroutine.
// Be sure to unregister the meter from the registry once it is of no use to
// allow for garbage collection.
func NewRegisteredMeter(name string, r Registry) Meter {
- c := NewMeter()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
+ return GetOrRegisterMeter(name, r)
}
-// NewRegisteredMeterForced constructs and registers a new StandardMeter
-// and launches a goroutine no matter the global switch is enabled or not.
-// Be sure to unregister the meter from the registry once it is of no use to
-// allow for garbage collection.
-func NewRegisteredMeterForced(name string, r Registry) Meter {
- c := NewMeterForced()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// MeterSnapshot is a read-only copy of another Meter.
-type MeterSnapshot struct {
- temp atomic.Int64
+// meterSnapshot is a read-only copy of the meter's internal values.
+type meterSnapshot struct {
count int64
rate1, rate5, rate15, rateMean float64
}
// Count returns the count of events at the time the snapshot was taken.
-func (m *MeterSnapshot) Count() int64 { return m.count }
-
-// Mark panics.
-func (*MeterSnapshot) Mark(n int64) {
- panic("Mark called on a MeterSnapshot")
-}
+func (m *meterSnapshot) Count() int64 { return m.count }
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
-func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
+func (m *meterSnapshot) Rate1() float64 { return m.rate1 }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
-func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
+func (m *meterSnapshot) Rate5() float64 { return m.rate5 }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
-func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
+func (m *meterSnapshot) Rate15() float64 { return m.rate15 }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
-func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
-
-// Snapshot returns the snapshot.
-func (m *MeterSnapshot) Snapshot() Meter { return m }
-
-// Stop is a no-op.
-func (m *MeterSnapshot) Stop() {}
+func (m *meterSnapshot) RateMean() float64 { return m.rateMean }
// NilMeter is a no-op Meter.
type NilMeter struct{}
-// Count is a no-op.
-func (NilMeter) Count() int64 { return 0 }
-
-// Mark is a no-op.
-func (NilMeter) Mark(n int64) {}
-
-// Rate1 is a no-op.
-func (NilMeter) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilMeter) Rate5() float64 { return 0.0 }
-
-// Rate15 is a no-op.
-func (NilMeter) Rate15() float64 { return 0.0 }
-
-// RateMean is a no-op.
-func (NilMeter) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilMeter) Snapshot() Meter { return NilMeter{} }
-
-// Stop is a no-op.
-func (NilMeter) Stop() {}
+func (NilMeter) Count() int64 { return 0 }
+func (NilMeter) Mark(n int64) {}
+func (NilMeter) Snapshot() MeterSnapshot { return (*emptySnapshot)(nil) }
+func (NilMeter) Stop() {}
// StandardMeter is the standard implementation of a Meter.
type StandardMeter struct {
- lock sync.RWMutex
- snapshot *MeterSnapshot
+ count atomic.Int64
+ uncounted atomic.Int64 // not yet added to the EWMAs
+ rateMean atomic.Uint64
+
a1, a5, a15 EWMA
startTime time.Time
stopped atomic.Bool
@@ -174,7 +115,6 @@ }
func newStandardMeter() *StandardMeter {
return &StandardMeter{
- snapshot: &MeterSnapshot{},
a1: NewEWMA1(),
a5: NewEWMA5(),
a15: NewEWMA15(),
@@ -184,97 +124,42 @@ }
// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
func (m *StandardMeter) Stop() {
- stopped := m.stopped.Swap(true)
- if !stopped {
+ if stopped := m.stopped.Swap(true); !stopped {
arbiter.Lock()
delete(arbiter.meters, m)
arbiter.Unlock()
}
}
-// Count returns the number of events recorded.
-// It updates the meter to be as accurate as possible
-func (m *StandardMeter) Count() int64 {
- m.lock.Lock()
- defer m.lock.Unlock()
- m.updateMeter()
- return m.snapshot.count
-}
-
// Mark records the occurrence of n events.
func (m *StandardMeter) Mark(n int64) {
- m.snapshot.temp.Add(n)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (m *StandardMeter) Rate1() float64 {
- m.lock.RLock()
- defer m.lock.RUnlock()
- return m.snapshot.rate1
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (m *StandardMeter) Rate5() float64 {
- m.lock.RLock()
- defer m.lock.RUnlock()
- return m.snapshot.rate5
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (m *StandardMeter) Rate15() float64 {
- m.lock.RLock()
- defer m.lock.RUnlock()
- return m.snapshot.rate15
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (m *StandardMeter) RateMean() float64 {
- m.lock.RLock()
- defer m.lock.RUnlock()
- return m.snapshot.rateMean
+ m.uncounted.Add(n)
}
// Snapshot returns a read-only copy of the meter.
-func (m *StandardMeter) Snapshot() Meter {
- m.lock.RLock()
- snapshot := MeterSnapshot{
- count: m.snapshot.count,
- rate1: m.snapshot.rate1,
- rate5: m.snapshot.rate5,
- rate15: m.snapshot.rate15,
- rateMean: m.snapshot.rateMean,
+func (m *StandardMeter) Snapshot() MeterSnapshot {
+ return &meterSnapshot{
+ count: m.count.Load() + m.uncounted.Load(),
+ rate1: m.a1.Snapshot().Rate(),
+ rate5: m.a5.Snapshot().Rate(),
+ rate15: m.a15.Snapshot().Rate(),
+ rateMean: math.Float64frombits(m.rateMean.Load()),
}
- snapshot.temp.Store(m.snapshot.temp.Load())
- m.lock.RUnlock()
- return &snapshot
}
-func (m *StandardMeter) updateSnapshot() {
- // should run with write lock held on m.lock
- snapshot := m.snapshot
- snapshot.rate1 = m.a1.Rate()
- snapshot.rate5 = m.a5.Rate()
- snapshot.rate15 = m.a15.Rate()
- snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
-}
-
-func (m *StandardMeter) updateMeter() {
- // should only run with write lock held on m.lock
- n := m.snapshot.temp.Swap(0)
- m.snapshot.count += n
+func (m *StandardMeter) tick() {
+ // Take the uncounted values, add to count
+ n := m.uncounted.Swap(0)
+ count := m.count.Add(n)
+ m.rateMean.Store(math.Float64bits(float64(count) / time.Since(m.startTime).Seconds()))
+ // Update the EWMA's internal state
m.a1.Update(n)
m.a5.Update(n)
m.a15.Update(n)
-}
-
-func (m *StandardMeter) tick() {
- m.lock.Lock()
- defer m.lock.Unlock()
- m.updateMeter()
+ // And trigger them to calculate the rates
m.a1.Tick()
m.a5.Tick()
m.a15.Tick()
- m.updateSnapshot()
}
// meterArbiter ticks meters every 5s from a single goroutine.
diff --git ethereum/go-ethereum/metrics/meter_test.go taikoxyz/taiko-geth/metrics/meter_test.go
index b3f6cb8c0c97e07752e7574d977f421859256db5..019c4d765b526abd56dbf7a87b83f5a7a84d346e 100644
--- ethereum/go-ethereum/metrics/meter_test.go
+++ taikoxyz/taiko-geth/metrics/meter_test.go
@@ -12,11 +12,17 @@ for i := 0; i < b.N; i++ {
m.Mark(1)
}
}
-
+func TestMeter(t *testing.T) {
+ m := NewMeter()
+ m.Mark(47)
+ if v := m.Snapshot().Count(); v != 47 {
+ t.Fatalf("have %d want %d", v, 47)
+ }
+}
func TestGetOrRegisterMeter(t *testing.T) {
r := NewRegistry()
NewRegisteredMeter("foo", r).Mark(47)
- if m := GetOrRegisterMeter("foo", r); m.Count() != 47 {
+ if m := GetOrRegisterMeter("foo", r).Snapshot(); m.Count() != 47 {
t.Fatal(m.Count())
}
}
@@ -31,10 +37,10 @@ m := newStandardMeter()
ma.meters[m] = struct{}{}
m.Mark(1)
ma.tickMeters()
- rateMean := m.RateMean()
+ rateMean := m.Snapshot().RateMean()
time.Sleep(100 * time.Millisecond)
ma.tickMeters()
- if m.RateMean() >= rateMean {
+ if m.Snapshot().RateMean() >= rateMean {
t.Error("m.RateMean() didn't decrease")
}
}
@@ -42,7 +48,7 @@
func TestMeterNonzero(t *testing.T) {
m := NewMeter()
m.Mark(3)
- if count := m.Count(); count != 3 {
+ if count := m.Snapshot().Count(); count != 3 {
t.Errorf("m.Count(): 3 != %v\n", count)
}
}
@@ -59,16 +65,8 @@ t.Errorf("arbiter.meters: %d != %d\n", l, len(arbiter.meters))
}
}
-func TestMeterSnapshot(t *testing.T) {
- m := NewMeter()
- m.Mark(1)
- if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() {
- t.Fatal(snapshot)
- }
-}
-
func TestMeterZero(t *testing.T) {
- m := NewMeter()
+ m := NewMeter().Snapshot()
if count := m.Count(); count != 0 {
t.Errorf("m.Count(): 0 != %v\n", count)
}
@@ -79,13 +77,13 @@ m := NewMeter()
for i := 0; i < 101; i++ {
m.Mark(int64(i))
}
- if count := m.Count(); count != 5050 {
+ if count := m.Snapshot().Count(); count != 5050 {
t.Errorf("m.Count(): 5050 != %v\n", count)
}
for i := 0; i < 101; i++ {
m.Mark(int64(i))
}
- if count := m.Count(); count != 10100 {
+ if count := m.Snapshot().Count(); count != 10100 {
t.Errorf("m.Count(): 10100 != %v\n", count)
}
}
diff --git ethereum/go-ethereum/metrics/metrics.go taikoxyz/taiko-geth/metrics/metrics.go
index c206f1692407db306c75995aa37ffa9a0ba55a95..9ca8f115c0f789cfa2b7ab52069b7e0e4cb08d3a 100644
--- ethereum/go-ethereum/metrics/metrics.go
+++ taikoxyz/taiko-geth/metrics/metrics.go
@@ -9,7 +9,9 @@ import (
"os"
"runtime/metrics"
"runtime/pprof"
+ "strconv"
"strings"
+ "syscall"
"time"
"github.com/ethereum/go-ethereum/log"
@@ -30,13 +32,35 @@
// enablerFlags is the CLI flag names to use to enable metrics collections.
var enablerFlags = []string{"metrics"}
+// enablerEnvVars is the env var names to use to enable metrics collections.
+var enablerEnvVars = []string{"GETH_METRICS"}
+
// expensiveEnablerFlags is the CLI flag names to use to enable metrics collections.
var expensiveEnablerFlags = []string{"metrics.expensive"}
+
+// expensiveEnablerEnvVars is the env var names to use to enable metrics collections.
+var expensiveEnablerEnvVars = []string{"GETH_METRICS_EXPENSIVE"}
// Init enables or disables the metrics system. Since we need this to run before
// any other code gets to create meters and timers, we'll actually do an ugly hack
// and peek into the command line args for the metrics flag.
func init() {
+ for _, enabler := range enablerEnvVars {
+ if val, found := syscall.Getenv(enabler); found && !Enabled {
+ if enable, _ := strconv.ParseBool(val); enable { // ignore error, flag parser will choke on it later
+ log.Info("Enabling metrics collection")
+ Enabled = true
+ }
+ }
+ }
+ for _, enabler := range expensiveEnablerEnvVars {
+ if val, found := syscall.Getenv(enabler); found && !EnabledExpensive {
+ if enable, _ := strconv.ParseBool(val); enable { // ignore error, flag parser will choke on it later
+ log.Info("Enabling expensive metrics collection")
+ EnabledExpensive = true
+ }
+ }
+ }
for _, arg := range os.Args {
flag := strings.TrimLeft(arg, "-")
@@ -83,6 +107,12 @@ {Name: "/memory/classes/heap/released:bytes"},
{Name: "/memory/classes/heap/unused:bytes"},
{Name: "/sched/goroutines:goroutines"},
{Name: "/sched/latencies:seconds"}, // histogram
+}
+
+func ReadRuntimeStats() *runtimeStats {
+ r := new(runtimeStats)
+ readRuntimeStats(r)
+ return r
}
func readRuntimeStats(v *runtimeStats) {
diff --git ethereum/go-ethereum/metrics/metrics_test.go taikoxyz/taiko-geth/metrics/metrics_test.go
index 534c44139b360031e1b228b2025cb351f90535db..2861d5f2caf61e051277d20e87342e704b0f461c 100644
--- ethereum/go-ethereum/metrics/metrics_test.go
+++ taikoxyz/taiko-geth/metrics/metrics_test.go
@@ -98,8 +98,8 @@ t := GetOrRegisterTimer("db.get.latency", nil)
t.Time(func() { time.Sleep(10 * time.Millisecond) })
t.Update(1)
- fmt.Println(c.Count())
- fmt.Println(t.Min())
+ fmt.Println(c.Snapshot().Count())
+ fmt.Println(t.Snapshot().Min())
// Output: 17
// 1
}
diff --git ethereum/go-ethereum/metrics/opentsdb.go taikoxyz/taiko-geth/metrics/opentsdb.go
index c9fd2e75d5e53600bdc5c4437144a7b5c4381747..e81690f94340eedf69afb19be393b8225f364092 100644
--- ethereum/go-ethereum/metrics/opentsdb.go
+++ taikoxyz/taiko-geth/metrics/opentsdb.go
@@ -3,6 +3,7 @@
import (
"bufio"
"fmt"
+ "io"
"log"
"net"
"os"
@@ -57,26 +58,22 @@ }
return shortHostName
}
-func openTSDB(c *OpenTSDBConfig) error {
- shortHostname := getShortHostname()
- now := time.Now().Unix()
+// writeRegistry writes the registry-metrics on the opentsb format.
+func (c *OpenTSDBConfig) writeRegistry(w io.Writer, now int64, shortHostname string) {
du := float64(c.DurationUnit)
- conn, err := net.DialTCP("tcp", nil, c.Addr)
- if nil != err {
- return err
- }
- defer conn.Close()
- w := bufio.NewWriter(conn)
+
c.Registry.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname)
case CounterFloat64:
- fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname)
case Gauge:
- fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname)
case GaugeFloat64:
- fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname)
+ case GaugeInfo:
+ fmt.Fprintf(w, "put %s.%s.value %d %s host=%s\n", c.Prefix, name, now, metric.Snapshot().Value().String(), shortHostname)
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
@@ -115,7 +112,17 @@ fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
}
- w.Flush()
})
+}
+
+func openTSDB(c *OpenTSDBConfig) error {
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.writeRegistry(w, time.Now().Unix(), getShortHostname())
+ w.Flush()
return nil
}
diff --git ethereum/go-ethereum/metrics/opentsdb_test.go taikoxyz/taiko-geth/metrics/opentsdb_test.go
index c43728960ed5cd3d7f5292c725c214ca7a8e3866..4548309f9c23d08c2a9b832ca71b091d6707e718 100644
--- ethereum/go-ethereum/metrics/opentsdb_test.go
+++ taikoxyz/taiko-geth/metrics/opentsdb_test.go
@@ -1,7 +1,11 @@
package metrics
import (
+ "fmt"
"net"
+ "os"
+ "strings"
+ "testing"
"time"
)
@@ -19,3 +23,44 @@ FlushInterval: 1 * time.Second,
DurationUnit: time.Millisecond,
})
}
+
+func TestExampleOpenTSB(t *testing.T) {
+ r := NewOrderedRegistry()
+ NewRegisteredGaugeInfo("foo", r).Update(GaugeInfoValue{"chain_id": "5"})
+ NewRegisteredGaugeFloat64("pi", r).Update(3.14)
+ NewRegisteredCounter("months", r).Inc(12)
+ NewRegisteredCounterFloat64("tau", r).Inc(1.57)
+ NewRegisteredMeter("elite", r).Mark(1337)
+ NewRegisteredTimer("second", r).Update(time.Second)
+ NewRegisteredCounterFloat64("tau", r).Inc(1.57)
+ NewRegisteredCounterFloat64("tau", r).Inc(1.57)
+
+ w := new(strings.Builder)
+ (&OpenTSDBConfig{
+ Registry: r,
+ DurationUnit: time.Millisecond,
+ Prefix: "pre",
+ }).writeRegistry(w, 978307200, "hal9000")
+
+ wantB, err := os.ReadFile("./testdata/opentsb.want")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if have, want := w.String(), string(wantB); have != want {
+ t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want)
+ t.Logf("have vs want:\n%v", findFirstDiffPos(have, want))
+ }
+}
+
+func findFirstDiffPos(a, b string) string {
+ yy := strings.Split(b, "\n")
+ for i, x := range strings.Split(a, "\n") {
+ if i >= len(yy) {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: <EOF>", i, x, i)
+ }
+ if y := yy[i]; x != y {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y)
+ }
+ }
+ return ""
+}
diff --git ethereum/go-ethereum/metrics/prometheus/collector.go taikoxyz/taiko-geth/metrics/prometheus/collector.go
index 2bd9bf22ccae03fdb122401c9d3ffd1ad20419ce..25b258d56ab125d22f9a884720fd58afc1ee0a83 100644
--- ethereum/go-ethereum/metrics/prometheus/collector.go
+++ taikoxyz/taiko-geth/metrics/prometheus/collector.go
@@ -19,6 +19,7 @@
import (
"bytes"
"fmt"
+ "sort"
"strconv"
"strings"
@@ -46,23 +47,55 @@ buff: &bytes.Buffer{},
}
}
-func (c *collector) addCounter(name string, m metrics.Counter) {
+// Add adds the metric i to the collector. This method returns an error if the
+// metric type is not supported/known.
+func (c *collector) Add(name string, i any) error {
+ switch m := i.(type) {
+ case metrics.Counter:
+ c.addCounter(name, m.Snapshot())
+ case metrics.CounterFloat64:
+ c.addCounterFloat64(name, m.Snapshot())
+ case metrics.Gauge:
+ c.addGauge(name, m.Snapshot())
+ case metrics.GaugeFloat64:
+ c.addGaugeFloat64(name, m.Snapshot())
+ case metrics.GaugeInfo:
+ c.addGaugeInfo(name, m.Snapshot())
+ case metrics.Histogram:
+ c.addHistogram(name, m.Snapshot())
+ case metrics.Meter:
+ c.addMeter(name, m.Snapshot())
+ case metrics.Timer:
+ c.addTimer(name, m.Snapshot())
+ case metrics.ResettingTimer:
+ c.addResettingTimer(name, m.Snapshot())
+ default:
+ return fmt.Errorf("unknown prometheus metric type %T", i)
+ }
+ return nil
+}
+
+func (c *collector) addCounter(name string, m metrics.CounterSnapshot) {
c.writeGaugeCounter(name, m.Count())
}
-func (c *collector) addCounterFloat64(name string, m metrics.CounterFloat64) {
+func (c *collector) addCounterFloat64(name string, m metrics.CounterFloat64Snapshot) {
c.writeGaugeCounter(name, m.Count())
}
-func (c *collector) addGauge(name string, m metrics.Gauge) {
+func (c *collector) addGauge(name string, m metrics.GaugeSnapshot) {
c.writeGaugeCounter(name, m.Value())
}
-func (c *collector) addGaugeFloat64(name string, m metrics.GaugeFloat64) {
+func (c *collector) addGaugeFloat64(name string, m metrics.GaugeFloat64Snapshot) {
c.writeGaugeCounter(name, m.Value())
}
-func (c *collector) addHistogram(name string, m metrics.Histogram) {
+func (c *collector) addGaugeInfo(name string, m metrics.GaugeInfoSnapshot) {
+ c.writeGaugeInfo(name, m.Value())
+}
+
+func (c *collector) addHistogram(name string, m metrics.HistogramSnapshot) {
pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
ps := m.Percentiles(pv)
c.writeSummaryCounter(name, m.Count())
@@ -73,11 +106,11 @@ }
c.buff.WriteRune('\n')
}
-func (c *collector) addMeter(name string, m metrics.Meter) {
+func (c *collector) addMeter(name string, m metrics.MeterSnapshot) {
c.writeGaugeCounter(name, m.Count())
}
-func (c *collector) addTimer(name string, m metrics.Timer) {
+func (c *collector) addTimer(name string, m metrics.TimerSnapshot) {
pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
ps := m.Percentiles(pv)
c.writeSummaryCounter(name, m.Count())
@@ -88,18 +121,30 @@ }
c.buff.WriteRune('\n')
}
-func (c *collector) addResettingTimer(name string, m metrics.ResettingTimer) {
- if len(m.Values()) <= 0 {
+func (c *collector) addResettingTimer(name string, m metrics.ResettingTimerSnapshot) {
+ if m.Count() <= 0 {
return
}
- ps := m.Percentiles([]float64{50, 95, 99})
- val := m.Values()
- c.writeSummaryCounter(name, len(val))
+ ps := m.Percentiles([]float64{0.50, 0.95, 0.99})
+ c.writeSummaryCounter(name, m.Count())
c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name)))
c.writeSummaryPercentile(name, "0.50", ps[0])
c.writeSummaryPercentile(name, "0.95", ps[1])
c.writeSummaryPercentile(name, "0.99", ps[2])
c.buff.WriteRune('\n')
+}
+
+func (c *collector) writeGaugeInfo(name string, value metrics.GaugeInfoValue) {
+ name = mutateKey(name)
+ c.buff.WriteString(fmt.Sprintf(typeGaugeTpl, name))
+ c.buff.WriteString(name)
+ c.buff.WriteString(" ")
+ var kvs []string
+ for k, v := range value {
+ kvs = append(kvs, fmt.Sprintf("%v=%q", k, v))
+ }
+ sort.Strings(kvs)
+ c.buff.WriteString(fmt.Sprintf("{%v} 1\n\n", strings.Join(kvs, ", ")))
}
func (c *collector) writeGaugeCounter(name string, value interface{}) {
diff --git ethereum/go-ethereum/metrics/prometheus/collector_test.go taikoxyz/taiko-geth/metrics/prometheus/collector_test.go
index ff87c8e765e16a6b3e1ce55d98a86215ffa7e570..ea17aac4585fd8688f9d2734435265ab7dc20ece 100644
--- ethereum/go-ethereum/metrics/prometheus/collector_test.go
+++ taikoxyz/taiko-geth/metrics/prometheus/collector_test.go
@@ -1,11 +1,29 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
package prometheus
import (
+ "fmt"
"os"
+ "strings"
"testing"
- "time"
"github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/metrics/internal"
)
func TestMain(m *testing.M) {
@@ -14,104 +32,34 @@ os.Exit(m.Run())
}
func TestCollector(t *testing.T) {
- c := newCollector()
-
- counter := metrics.NewCounter()
- counter.Inc(12345)
- c.addCounter("test/counter", counter)
-
- counterfloat64 := metrics.NewCounterFloat64()
- counterfloat64.Inc(54321.98)
- c.addCounterFloat64("test/counter_float64", counterfloat64)
-
- gauge := metrics.NewGauge()
- gauge.Update(23456)
- c.addGauge("test/gauge", gauge)
-
- gaugeFloat64 := metrics.NewGaugeFloat64()
- gaugeFloat64.Update(34567.89)
- c.addGaugeFloat64("test/gauge_float64", gaugeFloat64)
-
- histogram := metrics.NewHistogram(&metrics.NilSample{})
- c.addHistogram("test/histogram", histogram)
-
- meter := metrics.NewMeter()
- defer meter.Stop()
- meter.Mark(9999999)
- c.addMeter("test/meter", meter)
-
- timer := metrics.NewTimer()
- defer timer.Stop()
- timer.Update(20 * time.Millisecond)
- timer.Update(21 * time.Millisecond)
- timer.Update(22 * time.Millisecond)
- timer.Update(120 * time.Millisecond)
- timer.Update(23 * time.Millisecond)
- timer.Update(24 * time.Millisecond)
- c.addTimer("test/timer", timer)
-
- resettingTimer := metrics.NewResettingTimer()
- resettingTimer.Update(10 * time.Millisecond)
- resettingTimer.Update(11 * time.Millisecond)
- resettingTimer.Update(12 * time.Millisecond)
- resettingTimer.Update(120 * time.Millisecond)
- resettingTimer.Update(13 * time.Millisecond)
- resettingTimer.Update(14 * time.Millisecond)
- c.addResettingTimer("test/resetting_timer", resettingTimer.Snapshot())
-
- emptyResettingTimer := metrics.NewResettingTimer().Snapshot()
- c.addResettingTimer("test/empty_resetting_timer", emptyResettingTimer)
-
- const expectedOutput = `# TYPE test_counter gauge
-test_counter 12345
-
-# TYPE test_counter_float64 gauge
-test_counter_float64 54321.98
+ var (
+ c = newCollector()
+ want string
+ )
+ internal.ExampleMetrics().Each(func(name string, i interface{}) {
+ c.Add(name, i)
+ })
+ if wantB, err := os.ReadFile("./testdata/prometheus.want"); err != nil {
+ t.Fatal(err)
+ } else {
+ want = string(wantB)
+ }
+ if have := c.buff.String(); have != want {
+ t.Logf("have\n%v", have)
+ t.Logf("have vs want:\n%v", findFirstDiffPos(have, want))
+ t.Fatalf("unexpected collector output")
+ }
+}
-# TYPE test_gauge gauge
-test_gauge 23456
-
-# TYPE test_gauge_float64 gauge
-test_gauge_float64 34567.89
-
-# TYPE test_histogram_count counter
-test_histogram_count 0
-
-# TYPE test_histogram summary
-test_histogram {quantile="0.5"} 0
-test_histogram {quantile="0.75"} 0
-test_histogram {quantile="0.95"} 0
-test_histogram {quantile="0.99"} 0
-test_histogram {quantile="0.999"} 0
-test_histogram {quantile="0.9999"} 0
-
-# TYPE test_meter gauge
-test_meter 9999999
-
-# TYPE test_timer_count counter
-test_timer_count 6
-
-# TYPE test_timer summary
-test_timer {quantile="0.5"} 2.25e+07
-test_timer {quantile="0.75"} 4.8e+07
-test_timer {quantile="0.95"} 1.2e+08
-test_timer {quantile="0.99"} 1.2e+08
-test_timer {quantile="0.999"} 1.2e+08
-test_timer {quantile="0.9999"} 1.2e+08
-
-# TYPE test_resetting_timer_count counter
-test_resetting_timer_count 6
-
-# TYPE test_resetting_timer summary
-test_resetting_timer {quantile="0.50"} 12000000
-test_resetting_timer {quantile="0.95"} 120000000
-test_resetting_timer {quantile="0.99"} 120000000
-
-`
- exp := c.buff.String()
- if exp != expectedOutput {
- t.Log("Expected Output:\n", expectedOutput)
- t.Log("Actual Output:\n", exp)
- t.Fatal("unexpected collector output")
+func findFirstDiffPos(a, b string) string {
+ yy := strings.Split(b, "\n")
+ for i, x := range strings.Split(a, "\n") {
+ if i >= len(yy) {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: <EOF>", i, x, i)
+ }
+ if y := yy[i]; x != y {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y)
+ }
}
+ return ""
}
diff --git ethereum/go-ethereum/metrics/prometheus/prometheus.go taikoxyz/taiko-geth/metrics/prometheus/prometheus.go
index d966fa9a86662c3c1480fc81a5a015a36d1f7b54..dbdeae6c7f7d3b1450e28c816432f9eca07e3953 100644
--- ethereum/go-ethereum/metrics/prometheus/prometheus.go
+++ taikoxyz/taiko-geth/metrics/prometheus/prometheus.go
@@ -41,25 +41,7 @@ c := newCollector()
for _, name := range names {
i := reg.Get(name)
-
- switch m := i.(type) {
- case metrics.Counter:
- c.addCounter(name, m.Snapshot())
- case metrics.CounterFloat64:
- c.addCounterFloat64(name, m.Snapshot())
- case metrics.Gauge:
- c.addGauge(name, m.Snapshot())
- case metrics.GaugeFloat64:
- c.addGaugeFloat64(name, m.Snapshot())
- case metrics.Histogram:
- c.addHistogram(name, m.Snapshot())
- case metrics.Meter:
- c.addMeter(name, m.Snapshot())
- case metrics.Timer:
- c.addTimer(name, m.Snapshot())
- case metrics.ResettingTimer:
- c.addResettingTimer(name, m.Snapshot())
- default:
+ if err := c.Add(name, i); err != nil {
log.Warn("Unknown Prometheus metric type", "type", fmt.Sprintf("%T", i))
}
}
diff --git ethereum/go-ethereum/metrics/prometheus/testdata/prometheus.want taikoxyz/taiko-geth/metrics/prometheus/testdata/prometheus.want
new file mode 100644
index 0000000000000000000000000000000000000000..861c5f5cf0878883455a6ed449dd8de81cc1ff17
--- /dev/null
+++ taikoxyz/taiko-geth/metrics/prometheus/testdata/prometheus.want
@@ -0,0 +1,70 @@
+# TYPE system_cpu_schedlatency_count counter
+system_cpu_schedlatency_count 5645
+
+# TYPE system_cpu_schedlatency summary
+system_cpu_schedlatency {quantile="0.5"} 0
+system_cpu_schedlatency {quantile="0.75"} 7168
+system_cpu_schedlatency {quantile="0.95"} 1.6777216e+07
+system_cpu_schedlatency {quantile="0.99"} 2.9360128e+07
+system_cpu_schedlatency {quantile="0.999"} 3.3554432e+07
+system_cpu_schedlatency {quantile="0.9999"} 3.3554432e+07
+
+# TYPE system_memory_pauses_count counter
+system_memory_pauses_count 14
+
+# TYPE system_memory_pauses summary
+system_memory_pauses {quantile="0.5"} 32768
+system_memory_pauses {quantile="0.75"} 57344
+system_memory_pauses {quantile="0.95"} 196608
+system_memory_pauses {quantile="0.99"} 196608
+system_memory_pauses {quantile="0.999"} 196608
+system_memory_pauses {quantile="0.9999"} 196608
+
+# TYPE test_counter gauge
+test_counter 12345
+
+# TYPE test_counter_float64 gauge
+test_counter_float64 54321.98
+
+# TYPE test_gauge gauge
+test_gauge 23456
+
+# TYPE test_gauge_float64 gauge
+test_gauge_float64 34567.89
+
+# TYPE test_gauge_info gauge
+test_gauge_info {arch="amd64", commit="7caa2d8163ae3132c1c2d6978c76610caee2d949", os="linux", protocol_versions="64 65 66", version="1.10.18-unstable"} 1
+
+# TYPE test_histogram_count counter
+test_histogram_count 3
+
+# TYPE test_histogram summary
+test_histogram {quantile="0.5"} 2
+test_histogram {quantile="0.75"} 3
+test_histogram {quantile="0.95"} 3
+test_histogram {quantile="0.99"} 3
+test_histogram {quantile="0.999"} 3
+test_histogram {quantile="0.9999"} 3
+
+# TYPE test_meter gauge
+test_meter 0
+
+# TYPE test_resetting_timer_count counter
+test_resetting_timer_count 6
+
+# TYPE test_resetting_timer summary
+test_resetting_timer {quantile="0.50"} 1.25e+07
+test_resetting_timer {quantile="0.95"} 1.2e+08
+test_resetting_timer {quantile="0.99"} 1.2e+08
+
+# TYPE test_timer_count counter
+test_timer_count 6
+
+# TYPE test_timer summary
+test_timer {quantile="0.5"} 2.25e+07
+test_timer {quantile="0.75"} 4.8e+07
+test_timer {quantile="0.95"} 1.2e+08
+test_timer {quantile="0.99"} 1.2e+08
+test_timer {quantile="0.999"} 1.2e+08
+test_timer {quantile="0.9999"} 1.2e+08
+
diff --git ethereum/go-ethereum/metrics/registry.go taikoxyz/taiko-geth/metrics/registry.go
index ec6e37c54ffe5beb43f5d960f65a0f93ed5a0d40..8bfbc080420f0b66b80364bd45f2f900caede4e7 100644
--- ethereum/go-ethereum/metrics/registry.go
+++ taikoxyz/taiko-geth/metrics/registry.go
@@ -3,6 +3,7 @@
import (
"fmt"
"reflect"
+ "sort"
"strings"
"sync"
)
@@ -47,15 +48,37 @@ // Unregister the metric with the given name.
Unregister(string)
}
+type orderedRegistry struct {
+ StandardRegistry
+}
+
+// Call the given function for each registered metric.
+func (r *orderedRegistry) Each(f func(string, interface{})) {
+ var names []string
+ reg := r.registered()
+ for name := range reg {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ f(name, reg[name])
+ }
+}
+
+// NewRegistry creates a new registry.
+func NewRegistry() Registry {
+ return new(StandardRegistry)
+}
+
+// NewOrderedRegistry creates a new ordered registry (for testing).
+func NewOrderedRegistry() Registry {
+ return new(orderedRegistry)
+}
+
// The standard implementation of a Registry uses sync.map
// of names to metrics.
type StandardRegistry struct {
metrics sync.Map
-}
-
-// Create a new registry.
-func NewRegistry() Registry {
- return &StandardRegistry{}
}
// Call the given function for each registered metric.
@@ -127,13 +150,13 @@ r.Each(func(name string, i interface{}) {
values := make(map[string]interface{})
switch metric := i.(type) {
case Counter:
- values["count"] = metric.Count()
+ values["count"] = metric.Snapshot().Count()
case CounterFloat64:
- values["count"] = metric.Count()
+ values["count"] = metric.Snapshot().Count()
case Gauge:
- values["value"] = metric.Value()
+ values["value"] = metric.Snapshot().Value()
case GaugeFloat64:
- values["value"] = metric.Value()
+ values["value"] = metric.Snapshot().Value()
case Healthcheck:
values["error"] = nil
metric.Check()
@@ -191,7 +214,7 @@ }
func (r *StandardRegistry) loadOrRegister(name string, i interface{}) (interface{}, bool, bool) {
switch i.(type) {
- case Counter, CounterFloat64, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer:
+ case Counter, CounterFloat64, Gauge, GaugeFloat64, GaugeInfo, Healthcheck, Histogram, Meter, Timer, ResettingTimer:
default:
return nil, false, false
}
diff --git ethereum/go-ethereum/metrics/registry_test.go taikoxyz/taiko-geth/metrics/registry_test.go
index 7cc5cf14fe557d77834098a24f195c9bd0fc91dc..75012dd4ac00a18b41feb424697e82856536a6d8 100644
--- ethereum/go-ethereum/metrics/registry_test.go
+++ taikoxyz/taiko-geth/metrics/registry_test.go
@@ -85,11 +85,11 @@
func TestRegistryGet(t *testing.T) {
r := NewRegistry()
r.Register("foo", NewCounter())
- if count := r.Get("foo").(Counter).Count(); count != 0 {
+ if count := r.Get("foo").(Counter).Snapshot().Count(); count != 0 {
t.Fatal(count)
}
r.Get("foo").(Counter).Inc(1)
- if count := r.Get("foo").(Counter).Count(); count != 1 {
+ if count := r.Get("foo").(Counter).Snapshot().Count(); count != 1 {
t.Fatal(count)
}
}
diff --git ethereum/go-ethereum/metrics/resetting_sample.go taikoxyz/taiko-geth/metrics/resetting_sample.go
index 43c1129cd0bc921c1ea10ed69ee2f63591f6ac01..c38ffcd3ec32af5c110c68015569d1dc2a2f328f 100644
--- ethereum/go-ethereum/metrics/resetting_sample.go
+++ taikoxyz/taiko-geth/metrics/resetting_sample.go
@@ -17,7 +17,7 @@ Sample
}
// Snapshot returns a read-only copy of the sample with the original reset.
-func (rs *resettingSample) Snapshot() Sample {
+func (rs *resettingSample) Snapshot() SampleSnapshot {
s := rs.Sample.Snapshot()
rs.Sample.Clear()
return s
diff --git ethereum/go-ethereum/metrics/resetting_timer.go taikoxyz/taiko-geth/metrics/resetting_timer.go
index 8e23c8eeeaaa953906cad6cdebe8b3e41fee9b8b..6802e3fcea980bf4a17a0880aff8348a27c271f7 100644
--- ethereum/go-ethereum/metrics/resetting_timer.go
+++ taikoxyz/taiko-geth/metrics/resetting_timer.go
@@ -1,22 +1,24 @@
package metrics
import (
- "math"
"sync"
"time"
-
- "golang.org/x/exp/slices"
)
// Initial slice capacity for the values stored in a ResettingTimer
const InitialResettingTimerSliceCap = 10
+type ResettingTimerSnapshot interface {
+ Count() int
+ Mean() float64
+ Max() int64
+ Min() int64
+ Percentiles([]float64) []float64
+}
+
// ResettingTimer is used for storing aggregated values for timers, which are reset on every flush interval.
type ResettingTimer interface {
- Values() []int64
- Snapshot() ResettingTimer
- Percentiles([]float64) []int64
- Mean() float64
+ Snapshot() ResettingTimerSnapshot
Time(func())
Update(time.Duration)
UpdateSince(time.Time)
@@ -52,70 +54,40 @@ }
}
// NilResettingTimer is a no-op ResettingTimer.
-type NilResettingTimer struct {
-}
-
-// Values is a no-op.
-func (NilResettingTimer) Values() []int64 { return nil }
-
-// Snapshot is a no-op.
-func (NilResettingTimer) Snapshot() ResettingTimer {
- return &ResettingTimerSnapshot{
- values: []int64{},
- }
-}
+type NilResettingTimer struct{}
-// Time is a no-op.
-func (NilResettingTimer) Time(f func()) { f() }
-
-// Update is a no-op.
-func (NilResettingTimer) Update(time.Duration) {}
-
-// Percentiles panics.
-func (NilResettingTimer) Percentiles([]float64) []int64 {
- panic("Percentiles called on a NilResettingTimer")
-}
-
-// Mean panics.
-func (NilResettingTimer) Mean() float64 {
- panic("Mean called on a NilResettingTimer")
-}
-
-// UpdateSince is a no-op.
-func (NilResettingTimer) UpdateSince(time.Time) {}
+func (NilResettingTimer) Values() []int64 { return nil }
+func (n NilResettingTimer) Snapshot() ResettingTimerSnapshot { return n }
+func (NilResettingTimer) Time(f func()) { f() }
+func (NilResettingTimer) Update(time.Duration) {}
+func (NilResettingTimer) Percentiles([]float64) []float64 { return nil }
+func (NilResettingTimer) Mean() float64 { return 0.0 }
+func (NilResettingTimer) Max() int64 { return 0 }
+func (NilResettingTimer) Min() int64 { return 0 }
+func (NilResettingTimer) UpdateSince(time.Time) {}
+func (NilResettingTimer) Count() int { return 0 }
// StandardResettingTimer is the standard implementation of a ResettingTimer.
// and Meter.
type StandardResettingTimer struct {
values []int64
- mutex sync.Mutex
-}
+ sum int64 // sum is a running count of the total sum, used later to calculate mean
-// Values returns a slice with all measurements.
-func (t *StandardResettingTimer) Values() []int64 {
- return t.values
+ mutex sync.Mutex
}
// Snapshot resets the timer and returns a read-only copy of its contents.
-func (t *StandardResettingTimer) Snapshot() ResettingTimer {
+func (t *StandardResettingTimer) Snapshot() ResettingTimerSnapshot {
t.mutex.Lock()
defer t.mutex.Unlock()
- currentValues := t.values
- t.values = make([]int64, 0, InitialResettingTimerSliceCap)
-
- return &ResettingTimerSnapshot{
- values: currentValues,
+ snapshot := &resettingTimerSnapshot{}
+ if len(t.values) > 0 {
+ snapshot.mean = float64(t.sum) / float64(len(t.values))
+ snapshot.values = t.values
+ t.values = make([]int64, 0, InitialResettingTimerSliceCap)
}
-}
-
-// Percentiles panics.
-func (t *StandardResettingTimer) Percentiles([]float64) []int64 {
- panic("Percentiles called on a StandardResettingTimer")
-}
-
-// Mean panics.
-func (t *StandardResettingTimer) Mean() float64 {
- panic("Mean called on a StandardResettingTimer")
+ t.sum = 0
+ return snapshot
}
// Record the duration of the execution of the given function.
@@ -130,106 +102,70 @@ func (t *StandardResettingTimer) Update(d time.Duration) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.values = append(t.values, int64(d))
+ t.sum += int64(d)
}
// Record the duration of an event that started at a time and ends now.
func (t *StandardResettingTimer) UpdateSince(ts time.Time) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- t.values = append(t.values, int64(time.Since(ts)))
+ t.Update(time.Since(ts))
}
-// ResettingTimerSnapshot is a point-in-time copy of another ResettingTimer.
-type ResettingTimerSnapshot struct {
+// resettingTimerSnapshot is a point-in-time copy of another ResettingTimer.
+type resettingTimerSnapshot struct {
values []int64
mean float64
- thresholdBoundaries []int64
+ max int64
+ min int64
+ thresholdBoundaries []float64
calculated bool
}
-// Snapshot returns the snapshot.
-func (t *ResettingTimerSnapshot) Snapshot() ResettingTimer { return t }
-
-// Time panics.
-func (*ResettingTimerSnapshot) Time(func()) {
- panic("Time called on a ResettingTimerSnapshot")
-}
-
-// Update panics.
-func (*ResettingTimerSnapshot) Update(time.Duration) {
- panic("Update called on a ResettingTimerSnapshot")
-}
-
-// UpdateSince panics.
-func (*ResettingTimerSnapshot) UpdateSince(time.Time) {
- panic("UpdateSince called on a ResettingTimerSnapshot")
-}
-
-// Values returns all values from snapshot.
-func (t *ResettingTimerSnapshot) Values() []int64 {
- return t.values
+// Count return the length of the values from snapshot.
+func (t *resettingTimerSnapshot) Count() int {
+ return len(t.values)
}
// Percentiles returns the boundaries for the input percentiles.
-func (t *ResettingTimerSnapshot) Percentiles(percentiles []float64) []int64 {
+// note: this method is not thread safe
+func (t *resettingTimerSnapshot) Percentiles(percentiles []float64) []float64 {
t.calc(percentiles)
-
return t.thresholdBoundaries
}
// Mean returns the mean of the snapshotted values
-func (t *ResettingTimerSnapshot) Mean() float64 {
+// note: this method is not thread safe
+func (t *resettingTimerSnapshot) Mean() float64 {
if !t.calculated {
- t.calc([]float64{})
+ t.calc(nil)
}
return t.mean
}
-func (t *ResettingTimerSnapshot) calc(percentiles []float64) {
- slices.Sort(t.values)
-
- count := len(t.values)
- if count > 0 {
- min := t.values[0]
- max := t.values[count-1]
-
- cumulativeValues := make([]int64, count)
- cumulativeValues[0] = min
- for i := 1; i < count; i++ {
- cumulativeValues[i] = t.values[i] + cumulativeValues[i-1]
- }
-
- t.thresholdBoundaries = make([]int64, len(percentiles))
-
- thresholdBoundary := max
-
- for i, pct := range percentiles {
- if count > 1 {
- var abs float64
- if pct >= 0 {
- abs = pct
- } else {
- abs = 100 + pct
- }
- // poor man's math.Round(x):
- // math.Floor(x + 0.5)
- indexOfPerc := int(math.Floor(((abs / 100.0) * float64(count)) + 0.5))
- if pct >= 0 && indexOfPerc > 0 {
- indexOfPerc -= 1 // index offset=0
- }
- thresholdBoundary = t.values[indexOfPerc]
- }
-
- t.thresholdBoundaries[i] = thresholdBoundary
- }
+// Max returns the max of the snapshotted values
+// note: this method is not thread safe
+func (t *resettingTimerSnapshot) Max() int64 {
+ if !t.calculated {
+ t.calc(nil)
+ }
+ return t.max
+}
- sum := cumulativeValues[count-1]
- t.mean = float64(sum) / float64(count)
- } else {
- t.thresholdBoundaries = make([]int64, len(percentiles))
- t.mean = 0
+// Min returns the min of the snapshotted values
+// note: this method is not thread safe
+func (t *resettingTimerSnapshot) Min() int64 {
+ if !t.calculated {
+ t.calc(nil)
}
+ return t.min
+}
- t.calculated = true
+func (t *resettingTimerSnapshot) calc(percentiles []float64) {
+ scores := CalculatePercentiles(t.values, percentiles)
+ t.thresholdBoundaries = scores
+ if len(t.values) == 0 {
+ return
+ }
+ t.min = t.values[0]
+ t.max = t.values[len(t.values)-1]
}
diff --git ethereum/go-ethereum/metrics/resetting_timer_test.go taikoxyz/taiko-geth/metrics/resetting_timer_test.go
index 77c49dc3866aaead7be3703c9b2b7c0e267e9b4e..4571fc8eb052b6b3babcf166fc009a1eab03b683 100644
--- ethereum/go-ethereum/metrics/resetting_timer_test.go
+++ taikoxyz/taiko-geth/metrics/resetting_timer_test.go
@@ -10,9 +10,9 @@ tests := []struct {
values []int64
start int
end int
- wantP50 int64
- wantP95 int64
- wantP99 int64
+ wantP50 float64
+ wantP95 float64
+ wantP99 float64
wantMean float64
wantMin int64
wantMax int64
@@ -21,14 +21,14 @@ {
values: []int64{},
start: 1,
end: 11,
- wantP50: 5, wantP95: 10, wantP99: 10,
+ wantP50: 5.5, wantP95: 10, wantP99: 10,
wantMin: 1, wantMax: 10, wantMean: 5.5,
},
{
values: []int64{},
start: 1,
end: 101,
- wantP50: 50, wantP95: 95, wantP99: 99,
+ wantP50: 50.5, wantP95: 95.94999999999999, wantP99: 99.99,
wantMin: 1, wantMax: 100, wantMean: 50.5,
},
{
@@ -56,11 +56,11 @@ {
values: []int64{1, 10},
start: 0,
end: 0,
- wantP50: 1, wantP95: 10, wantP99: 10,
+ wantP50: 5.5, wantP95: 10, wantP99: 10,
wantMin: 1, wantMax: 10, wantMean: 5.5,
},
}
- for ind, tt := range tests {
+ for i, tt := range tests {
timer := NewResettingTimer()
for i := tt.start; i < tt.end; i++ {
@@ -70,37 +70,27 @@
for _, v := range tt.values {
timer.Update(time.Duration(v))
}
-
snap := timer.Snapshot()
- ps := snap.Percentiles([]float64{50, 95, 99})
+ ps := snap.Percentiles([]float64{0.50, 0.95, 0.99})
- val := snap.Values()
-
- if len(val) > 0 {
- if tt.wantMin != val[0] {
- t.Fatalf("%d: min: got %d, want %d", ind, val[0], tt.wantMin)
- }
-
- if tt.wantMax != val[len(val)-1] {
- t.Fatalf("%d: max: got %d, want %d", ind, val[len(val)-1], tt.wantMax)
- }
+ if have, want := snap.Min(), tt.wantMin; have != want {
+ t.Fatalf("%d: min: have %d, want %d", i, have, want)
+ }
+ if have, want := snap.Max(), tt.wantMax; have != want {
+ t.Fatalf("%d: max: have %d, want %d", i, have, want)
}
-
- if tt.wantMean != snap.Mean() {
- t.Fatalf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean)
+ if have, want := snap.Mean(), tt.wantMean; have != want {
+ t.Fatalf("%d: mean: have %v, want %v", i, have, want)
}
-
- if tt.wantP50 != ps[0] {
- t.Fatalf("%d: p50: got %d, want %d", ind, ps[0], tt.wantP50)
+ if have, want := ps[0], tt.wantP50; have != want {
+ t.Errorf("%d: p50: have %v, want %v", i, have, want)
}
-
- if tt.wantP95 != ps[1] {
- t.Fatalf("%d: p95: got %d, want %d", ind, ps[1], tt.wantP95)
+ if have, want := ps[1], tt.wantP95; have != want {
+ t.Errorf("%d: p95: have %v, want %v", i, have, want)
}
-
- if tt.wantP99 != ps[2] {
- t.Fatalf("%d: p99: got %d, want %d", ind, ps[2], tt.wantP99)
+ if have, want := ps[2], tt.wantP99; have != want {
+ t.Errorf("%d: p99: have %v, want %v", i, have, want)
}
}
}
@@ -110,11 +100,11 @@ tests := []struct {
values []int64
start int
end int
- wantP05 int64
- wantP20 int64
- wantP50 int64
- wantP95 int64
- wantP99 int64
+ wantP05 float64
+ wantP20 float64
+ wantP50 float64
+ wantP95 float64
+ wantP99 float64
wantMean float64
wantMin int64
wantMax int64
@@ -123,14 +113,14 @@ {
values: []int64{},
start: 1,
end: 11,
- wantP05: 1, wantP20: 2, wantP50: 5, wantP95: 10, wantP99: 10,
+ wantP05: 1, wantP20: 2.2, wantP50: 5.5, wantP95: 10, wantP99: 10,
wantMin: 1, wantMax: 10, wantMean: 5.5,
},
{
values: []int64{},
start: 1,
end: 101,
- wantP05: 5, wantP20: 20, wantP50: 50, wantP95: 95, wantP99: 99,
+ wantP05: 5.050000000000001, wantP20: 20.200000000000003, wantP50: 50.5, wantP95: 95.94999999999999, wantP99: 99.99,
wantMin: 1, wantMax: 100, wantMean: 50.5,
},
{
@@ -158,7 +148,7 @@ {
values: []int64{1, 10},
start: 0,
end: 0,
- wantP05: 1, wantP20: 1, wantP50: 1, wantP95: 10, wantP99: 10,
+ wantP05: 1, wantP20: 1, wantP50: 5.5, wantP95: 10, wantP99: 10,
wantMin: 1, wantMax: 10, wantMean: 5.5,
},
}
@@ -175,42 +165,33 @@ }
snap := timer.Snapshot()
- ps := snap.Percentiles([]float64{5, 20, 50, 95, 99})
-
- val := snap.Values()
+ ps := snap.Percentiles([]float64{0.05, 0.20, 0.50, 0.95, 0.99})
- if len(val) > 0 {
- if tt.wantMin != val[0] {
- t.Fatalf("%d: min: got %d, want %d", ind, val[0], tt.wantMin)
- }
+ if tt.wantMin != snap.Min() {
+ t.Errorf("%d: min: got %d, want %d", ind, snap.Min(), tt.wantMin)
+ }
- if tt.wantMax != val[len(val)-1] {
- t.Fatalf("%d: max: got %d, want %d", ind, val[len(val)-1], tt.wantMax)
- }
+ if tt.wantMax != snap.Max() {
+ t.Errorf("%d: max: got %d, want %d", ind, snap.Max(), tt.wantMax)
}
if tt.wantMean != snap.Mean() {
- t.Fatalf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean)
+ t.Errorf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean)
}
-
if tt.wantP05 != ps[0] {
- t.Fatalf("%d: p05: got %d, want %d", ind, ps[0], tt.wantP05)
+ t.Errorf("%d: p05: got %v, want %v", ind, ps[0], tt.wantP05)
}
-
if tt.wantP20 != ps[1] {
- t.Fatalf("%d: p20: got %d, want %d", ind, ps[1], tt.wantP20)
+ t.Errorf("%d: p20: got %v, want %v", ind, ps[1], tt.wantP20)
}
-
if tt.wantP50 != ps[2] {
- t.Fatalf("%d: p50: got %d, want %d", ind, ps[2], tt.wantP50)
+ t.Errorf("%d: p50: got %v, want %v", ind, ps[2], tt.wantP50)
}
-
if tt.wantP95 != ps[3] {
- t.Fatalf("%d: p95: got %d, want %d", ind, ps[3], tt.wantP95)
+ t.Errorf("%d: p95: got %v, want %v", ind, ps[3], tt.wantP95)
}
-
if tt.wantP99 != ps[4] {
- t.Fatalf("%d: p99: got %d, want %d", ind, ps[4], tt.wantP99)
+ t.Errorf("%d: p99: got %v, want %v", ind, ps[4], tt.wantP99)
}
}
}
diff --git ethereum/go-ethereum/metrics/runtimehistogram.go taikoxyz/taiko-geth/metrics/runtimehistogram.go
index c68939af1ef7278feb875deecb2e56f29d681541..92fcbcc2814c21030dad3e34044cf8849c0c8033 100644
--- ethereum/go-ethereum/metrics/runtimehistogram.go
+++ taikoxyz/taiko-geth/metrics/runtimehistogram.go
@@ -17,13 +17,19 @@ }
// runtimeHistogram wraps a runtime/metrics histogram.
type runtimeHistogram struct {
- v atomic.Value
+ v atomic.Value // v is a pointer to a metrics.Float64Histogram
scaleFactor float64
}
func newRuntimeHistogram(scale float64) *runtimeHistogram {
h := &runtimeHistogram{scaleFactor: scale}
- h.update(&metrics.Float64Histogram{})
+ h.update(new(metrics.Float64Histogram))
+ return h
+}
+
+func RuntimeHistogramFromData(scale float64, hist *metrics.Float64Histogram) *runtimeHistogram {
+ h := &runtimeHistogram{scaleFactor: scale}
+ h.update(hist)
return h
}
@@ -35,130 +41,107 @@ // conditionals everywhere.
return
}
- s := runtimeHistogramSnapshot{
+ s := metrics.Float64Histogram{
Counts: make([]uint64, len(mh.Counts)),
Buckets: make([]float64, len(mh.Buckets)),
}
copy(s.Counts, mh.Counts)
- copy(s.Buckets, mh.Buckets)
- for i, b := range s.Buckets {
+ for i, b := range mh.Buckets {
s.Buckets[i] = b * h.scaleFactor
}
h.v.Store(&s)
}
-func (h *runtimeHistogram) load() *runtimeHistogramSnapshot {
- return h.v.Load().(*runtimeHistogramSnapshot)
-}
-
func (h *runtimeHistogram) Clear() {
panic("runtimeHistogram does not support Clear")
}
func (h *runtimeHistogram) Update(int64) {
panic("runtimeHistogram does not support Update")
}
-func (h *runtimeHistogram) Sample() Sample {
- return NilSample{}
-}
-// Snapshot returns a non-changing cop of the histogram.
-func (h *runtimeHistogram) Snapshot() Histogram {
- return h.load()
+// Snapshot returns a non-changing copy of the histogram.
+func (h *runtimeHistogram) Snapshot() HistogramSnapshot {
+ hist := h.v.Load().(*metrics.Float64Histogram)
+ return newRuntimeHistogramSnapshot(hist)
}
-// Count returns the sample count.
-func (h *runtimeHistogram) Count() int64 {
- return h.load().Count()
+type runtimeHistogramSnapshot struct {
+ internal *metrics.Float64Histogram
+ calculated bool
+ // The following fields are (lazily) calculated based on 'internal'
+ mean float64
+ count int64
+ min int64 // min is the lowest sample value.
+ max int64 // max is the highest sample value.
+ variance float64
}
-// Mean returns an approximation of the mean.
-func (h *runtimeHistogram) Mean() float64 {
- return h.load().Mean()
+func newRuntimeHistogramSnapshot(h *metrics.Float64Histogram) *runtimeHistogramSnapshot {
+ return &runtimeHistogramSnapshot{
+ internal: h,
+ }
}
-// StdDev approximates the standard deviation of the histogram.
-func (h *runtimeHistogram) StdDev() float64 {
- return h.load().StdDev()
-}
-
-// Variance approximates the variance of the histogram.
-func (h *runtimeHistogram) Variance() float64 {
- return h.load().Variance()
-}
-
-// Percentile computes the p'th percentile value.
-func (h *runtimeHistogram) Percentile(p float64) float64 {
- return h.load().Percentile(p)
-}
-
-// Percentiles computes all requested percentile values.
-func (h *runtimeHistogram) Percentiles(ps []float64) []float64 {
- return h.load().Percentiles(ps)
-}
-
-// Max returns the highest sample value.
-func (h *runtimeHistogram) Max() int64 {
- return h.load().Max()
-}
-
-// Min returns the lowest sample value.
-func (h *runtimeHistogram) Min() int64 {
- return h.load().Min()
-}
-
-// Sum returns the sum of all sample values.
-func (h *runtimeHistogram) Sum() int64 {
- return h.load().Sum()
-}
-
-type runtimeHistogramSnapshot metrics.Float64Histogram
-
-func (h *runtimeHistogramSnapshot) Clear() {
- panic("runtimeHistogram does not support Clear")
-}
-func (h *runtimeHistogramSnapshot) Update(int64) {
- panic("runtimeHistogram does not support Update")
-}
-func (h *runtimeHistogramSnapshot) Sample() Sample {
- return NilSample{}
-}
-
-func (h *runtimeHistogramSnapshot) Snapshot() Histogram {
- return h
+// calc calculates the values for the snapshot. This method is not threadsafe.
+func (h *runtimeHistogramSnapshot) calc() {
+ h.calculated = true
+ var (
+ count int64 // number of samples
+ sum float64 // approx sum of all sample values
+ min int64
+ max float64
+ )
+ if len(h.internal.Counts) == 0 {
+ return
+ }
+ for i, c := range h.internal.Counts {
+ if c == 0 {
+ continue
+ }
+ if count == 0 { // Set min only first loop iteration
+ min = int64(math.Floor(h.internal.Buckets[i]))
+ }
+ count += int64(c)
+ sum += h.midpoint(i) * float64(c)
+ // Set max on every iteration
+ edge := h.internal.Buckets[i+1]
+ if math.IsInf(edge, 1) {
+ edge = h.internal.Buckets[i]
+ }
+ if edge > max {
+ max = edge
+ }
+ }
+ h.min = min
+ h.max = int64(max)
+ h.mean = sum / float64(count)
+ h.count = count
}
// Count returns the sample count.
func (h *runtimeHistogramSnapshot) Count() int64 {
- var count int64
- for _, c := range h.Counts {
- count += int64(c)
+ if !h.calculated {
+ h.calc()
}
- return count
+ return h.count
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (h *runtimeHistogramSnapshot) Size() int {
+ return len(h.internal.Counts)
}
// Mean returns an approximation of the mean.
func (h *runtimeHistogramSnapshot) Mean() float64 {
- if len(h.Counts) == 0 {
- return 0
+ if !h.calculated {
+ h.calc()
}
- mean, _ := h.mean()
- return mean
-}
-
-// mean computes the mean and also the total sample count.
-func (h *runtimeHistogramSnapshot) mean() (mean, totalCount float64) {
- var sum float64
- for i, c := range h.Counts {
- midpoint := h.midpoint(i)
- sum += midpoint * float64(c)
- totalCount += float64(c)
- }
- return sum / totalCount, totalCount
+ return h.mean
}
func (h *runtimeHistogramSnapshot) midpoint(bucket int) float64 {
- high := h.Buckets[bucket+1]
- low := h.Buckets[bucket]
+ high := h.internal.Buckets[bucket+1]
+ low := h.internal.Buckets[bucket]
if math.IsInf(high, 1) {
// The edge of the highest bucket can be +Inf, and it's supposed to mean that this
// bucket contains all remaining samples > low. We can't get the middle of an
@@ -180,23 +163,31 @@ }
// Variance approximates the variance of the histogram.
func (h *runtimeHistogramSnapshot) Variance() float64 {
- if len(h.Counts) == 0 {
+ if len(h.internal.Counts) == 0 {
return 0
}
-
- mean, totalCount := h.mean()
- if totalCount <= 1 {
+ if !h.calculated {
+ h.calc()
+ }
+ if h.count <= 1 {
// There is no variance when there are zero or one items.
return 0
}
-
+ // Variance is not calculated in 'calc', because it requires a second iteration.
+ // Therefore we calculate it lazily in this method, triggered either by
+ // a direct call to Variance or via StdDev.
+ if h.variance != 0.0 {
+ return h.variance
+ }
var sum float64
- for i, c := range h.Counts {
+
+ for i, c := range h.internal.Counts {
midpoint := h.midpoint(i)
- d := midpoint - mean
+ d := midpoint - h.mean
sum += float64(c) * (d * d)
}
- return sum / (totalCount - 1)
+ h.variance = sum / float64(h.count-1)
+ return h.variance
}
// Percentile computes the p'th percentile value.
@@ -231,11 +222,11 @@ }
func (h *runtimeHistogramSnapshot) computePercentiles(thresh []float64) {
var totalCount float64
- for i, count := range h.Counts {
+ for i, count := range h.internal.Counts {
totalCount += float64(count)
for len(thresh) > 0 && thresh[0] < totalCount {
- thresh[0] = h.Buckets[i]
+ thresh[0] = h.internal.Buckets[i]
thresh = thresh[1:]
}
if len(thresh) == 0 {
@@ -250,34 +241,25 @@ // also doesn't keep track of individual samples, so results are approximated.
// Max returns the highest sample value.
func (h *runtimeHistogramSnapshot) Max() int64 {
- for i := len(h.Counts) - 1; i >= 0; i-- {
- count := h.Counts[i]
- if count > 0 {
- edge := h.Buckets[i+1]
- if math.IsInf(edge, 1) {
- edge = h.Buckets[i]
- }
- return int64(math.Ceil(edge))
- }
+ if !h.calculated {
+ h.calc()
}
- return 0
+ return h.max
}
// Min returns the lowest sample value.
func (h *runtimeHistogramSnapshot) Min() int64 {
- for i, count := range h.Counts {
- if count > 0 {
- return int64(math.Floor(h.Buckets[i]))
- }
+ if !h.calculated {
+ h.calc()
}
- return 0
+ return h.min
}
// Sum returns the sum of all sample values.
func (h *runtimeHistogramSnapshot) Sum() int64 {
var sum float64
- for i := range h.Counts {
- sum += h.Buckets[i] * float64(h.Counts[i])
+ for i := range h.internal.Counts {
+ sum += h.internal.Buckets[i] * float64(h.internal.Counts[i])
}
return int64(math.Ceil(sum))
}
diff --git ethereum/go-ethereum/metrics/runtimehistogram_test.go taikoxyz/taiko-geth/metrics/runtimehistogram_test.go
index d53a01438311bbd5e58e7102b858c34dc397a1b6..cf7e36420ae9240ad88e031969f2986684362748 100644
--- ethereum/go-ethereum/metrics/runtimehistogram_test.go
+++ taikoxyz/taiko-geth/metrics/runtimehistogram_test.go
@@ -1,11 +1,14 @@
package metrics
import (
+ "bytes"
+ "encoding/gob"
"fmt"
"math"
"reflect"
"runtime/metrics"
"testing"
+ "time"
)
var _ Histogram = (*runtimeHistogram)(nil)
@@ -74,7 +77,7 @@ }
for i, test := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
- s := runtimeHistogramSnapshot(test.h)
+ s := RuntimeHistogramFromData(1.0, &test.h).Snapshot()
if v := s.Count(); v != test.Count {
t.Errorf("Count() = %v, want %v", v, test.Count)
@@ -121,13 +124,39 @@
// This test verifies that requesting Percentiles in unsorted order
// returns them in the requested order.
func TestRuntimeHistogramStatsPercentileOrder(t *testing.T) {
- p := runtimeHistogramSnapshot{
+ s := RuntimeHistogramFromData(1.0, &metrics.Float64Histogram{
Counts: []uint64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
Buckets: []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
- }
- result := p.Percentiles([]float64{1, 0.2, 0.5, 0.1, 0.2})
+ }).Snapshot()
+ result := s.Percentiles([]float64{1, 0.2, 0.5, 0.1, 0.2})
expected := []float64{10, 2, 5, 1, 2}
if !reflect.DeepEqual(result, expected) {
t.Fatal("wrong result:", result)
}
}
+
+func BenchmarkRuntimeHistogramSnapshotRead(b *testing.B) {
+ var sLatency = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06T\xff\x82\x01\xff\xa2\x00\xfe\r\xef\x00\x01\x02\x02\x04\x05\x04\b\x15\x17 B?6.L;$!2) \x1a? \x190aH7FY6#\x190\x1d\x14\x10\x1b\r\t\x04\x03\x01\x01\x00\x03\x02\x00\x03\x05\x05\x02\x02\x06\x04\v\x06\n\x15\x18\x13'&.\x12=H/L&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00"
+
+ dserialize := func(data string) *metrics.Float64Histogram {
+ var res metrics.Float64Histogram
+ if err := gob.NewDecoder(bytes.NewReader([]byte(data))).Decode(&res); err != nil {
+ panic(err)
+ }
+ return &res
+ }
+ latency := RuntimeHistogramFromData(float64(time.Second), dserialize(sLatency))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ snap := latency.Snapshot()
+ // These are the fields that influxdb accesses
+ _ = snap.Count()
+ _ = snap.Max()
+ _ = snap.Mean()
+ _ = snap.Min()
+ _ = snap.StdDev()
+ _ = snap.Variance()
+ _ = snap.Percentiles([]float64{0.25, 0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
+ }
+}
diff --git ethereum/go-ethereum/metrics/sample.go taikoxyz/taiko-geth/metrics/sample.go
index 252a878f581ba3697668a818e97709c6ebaebf50..5398dd42d5de28675e981d8003933ccd64dc245a 100644
--- ethereum/go-ethereum/metrics/sample.go
+++ taikoxyz/taiko-geth/metrics/sample.go
@@ -11,10 +11,7 @@ )
const rescaleThreshold = time.Hour
-// Samples maintain a statistically-significant selection of values from
-// a stream.
-type Sample interface {
- Clear()
+type SampleSnapshot interface {
Count() int64
Max() int64
Mean() float64
@@ -22,12 +19,17 @@ Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
Size() int
- Snapshot() Sample
StdDev() float64
Sum() int64
- Update(int64)
- Values() []int64
Variance() float64
+}
+
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+ Snapshot() SampleSnapshot
+ Clear()
+ Update(int64)
}
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
@@ -77,72 +79,29 @@ s.t1 = s.t0.Add(rescaleThreshold)
s.values.Clear()
}
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *ExpDecaySample) Count() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Max() int64 {
- return SampleMax(s.Values())
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *ExpDecaySample) Mean() float64 {
- return SampleMean(s.Values())
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Min() int64 {
- return SampleMin(s.Values())
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *ExpDecaySample) Percentile(p float64) float64 {
- return SamplePercentile(s.Values(), p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
- return SamplePercentiles(s.Values(), ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *ExpDecaySample) Size() int {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.values.Size()
-}
-
// Snapshot returns a read-only copy of the sample.
-func (s *ExpDecaySample) Snapshot() Sample {
+func (s *ExpDecaySample) Snapshot() SampleSnapshot {
s.mutex.Lock()
defer s.mutex.Unlock()
- vals := s.values.Values()
- values := make([]int64, len(vals))
- for i, v := range vals {
- values[i] = v.v
- }
- return &SampleSnapshot{
- count: s.count,
- values: values,
+ var (
+ samples = s.values.Values()
+ values = make([]int64, len(samples))
+ max int64 = math.MinInt64
+ min int64 = math.MaxInt64
+ sum int64
+ )
+ for i, item := range samples {
+ v := item.v
+ values[i] = v
+ sum += v
+ if v > max {
+ max = v
+ }
+ if v < min {
+ min = v
+ }
}
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *ExpDecaySample) StdDev() float64 {
- return SampleStdDev(s.Values())
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *ExpDecaySample) Sum() int64 {
- return SampleSum(s.Values())
+ return newSampleSnapshotPrecalculated(s.count, values, min, max, sum)
}
// Update samples a new value.
@@ -150,23 +109,6 @@ func (s *ExpDecaySample) Update(v int64) {
s.update(time.Now(), v)
}
-// Values returns a copy of the values in the sample.
-func (s *ExpDecaySample) Values() []int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- vals := s.values.Values()
- values := make([]int64, len(vals))
- for i, v := range vals {
- values[i] = v.v
- }
- return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *ExpDecaySample) Variance() float64 {
- return SampleVariance(s.Values())
-}
-
// update samples a new value at a particular timestamp. This is a method all
// its own to facilitate testing.
func (s *ExpDecaySample) update(t time.Time, v int64) {
@@ -202,207 +144,160 @@
// NilSample is a no-op Sample.
type NilSample struct{}
-// Clear is a no-op.
-func (NilSample) Clear() {}
-
-// Count is a no-op.
-func (NilSample) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilSample) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilSample) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilSample) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilSample) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilSample) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Size is a no-op.
-func (NilSample) Size() int { return 0 }
-
-// Sample is a no-op.
-func (NilSample) Snapshot() Sample { return NilSample{} }
-
-// StdDev is a no-op.
-func (NilSample) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilSample) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilSample) Update(v int64) {}
-
-// Values is a no-op.
-func (NilSample) Values() []int64 { return []int64{} }
-
-// Variance is a no-op.
-func (NilSample) Variance() float64 { return 0.0 }
-
-// SampleMax returns the maximum value of the slice of int64.
-func SampleMax(values []int64) int64 {
- if len(values) == 0 {
- return 0
- }
- var max int64 = math.MinInt64
- for _, v := range values {
- if max < v {
- max = v
- }
- }
- return max
-}
-
-// SampleMean returns the mean value of the slice of int64.
-func SampleMean(values []int64) float64 {
- if len(values) == 0 {
- return 0.0
- }
- return float64(SampleSum(values)) / float64(len(values))
-}
-
-// SampleMin returns the minimum value of the slice of int64.
-func SampleMin(values []int64) int64 {
- if len(values) == 0 {
- return 0
- }
- var min int64 = math.MaxInt64
- for _, v := range values {
- if min > v {
- min = v
- }
- }
- return min
-}
+func (NilSample) Clear() {}
+func (NilSample) Snapshot() SampleSnapshot { return (*emptySnapshot)(nil) }
+func (NilSample) Update(v int64) {}
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
func SamplePercentile(values []int64, p float64) float64 {
- return SamplePercentiles(values, []float64{p})[0]
+ return CalculatePercentiles(values, []float64{p})[0]
}
-// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
-// int64.
-func SamplePercentiles(values []int64, ps []float64) []float64 {
+// CalculatePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64. This method returns interpolated results, so e.g if there are only two
+// values, [0, 10], a 50% percentile will land between them.
+//
+// Note: As a side-effect, this method will also sort the slice of values.
+// Note2: The input format for percentiles is NOT percent! To express 50%, use 0.5, not 50.
+func CalculatePercentiles(values []int64, ps []float64) []float64 {
scores := make([]float64, len(ps))
size := len(values)
- if size > 0 {
- slices.Sort(values)
- for i, p := range ps {
- pos := p * float64(size+1)
- if pos < 1.0 {
- scores[i] = float64(values[0])
- } else if pos >= float64(size) {
- scores[i] = float64(values[size-1])
- } else {
- lower := float64(values[int(pos)-1])
- upper := float64(values[int(pos)])
- scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
- }
+ if size == 0 {
+ return scores
+ }
+ slices.Sort(values)
+ for i, p := range ps {
+ pos := p * float64(size+1)
+
+ if pos < 1.0 {
+ scores[i] = float64(values[0])
+ } else if pos >= float64(size) {
+ scores[i] = float64(values[size-1])
+ } else {
+ lower := float64(values[int(pos)-1])
+ upper := float64(values[int(pos)])
+ scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
}
}
return scores
}
-// SampleSnapshot is a read-only copy of another Sample.
-type SampleSnapshot struct {
+// sampleSnapshot is a read-only copy of another Sample.
+type sampleSnapshot struct {
count int64
values []int64
+
+ max int64
+ min int64
+ mean float64
+ sum int64
+ variance float64
}
-func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
- return &SampleSnapshot{
+// newSampleSnapshotPrecalculated creates a read-only sampleSnapShot, using
+// precalculated sums to avoid iterating the values
+func newSampleSnapshotPrecalculated(count int64, values []int64, min, max, sum int64) *sampleSnapshot {
+ if len(values) == 0 {
+ return &sampleSnapshot{
+ count: count,
+ values: values,
+ }
+ }
+ return &sampleSnapshot{
count: count,
values: values,
+ max: max,
+ min: min,
+ mean: float64(sum) / float64(len(values)),
+ sum: sum,
}
}
-// Clear panics.
-func (*SampleSnapshot) Clear() {
- panic("Clear called on a SampleSnapshot")
+// newSampleSnapshot creates a read-only sampleSnapShot, and calculates some
+// numbers.
+func newSampleSnapshot(count int64, values []int64) *sampleSnapshot {
+ var (
+ max int64 = math.MinInt64
+ min int64 = math.MaxInt64
+ sum int64
+ )
+ for _, v := range values {
+ sum += v
+ if v > max {
+ max = v
+ }
+ if v < min {
+ min = v
+ }
+ }
+ return newSampleSnapshotPrecalculated(count, values, min, max, sum)
}
// Count returns the count of inputs at the time the snapshot was taken.
-func (s *SampleSnapshot) Count() int64 { return s.count }
+func (s *sampleSnapshot) Count() int64 { return s.count }
// Max returns the maximal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+func (s *sampleSnapshot) Max() int64 { return s.max }
// Mean returns the mean value at the time the snapshot was taken.
-func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+func (s *sampleSnapshot) Mean() float64 { return s.mean }
// Min returns the minimal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+func (s *sampleSnapshot) Min() int64 { return s.min }
// Percentile returns an arbitrary percentile of values at the time the
// snapshot was taken.
-func (s *SampleSnapshot) Percentile(p float64) float64 {
+func (s *sampleSnapshot) Percentile(p float64) float64 {
return SamplePercentile(s.values, p)
}
// Percentiles returns a slice of arbitrary percentiles of values at the time
// the snapshot was taken.
-func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
- return SamplePercentiles(s.values, ps)
+func (s *sampleSnapshot) Percentiles(ps []float64) []float64 {
+ return CalculatePercentiles(s.values, ps)
}
// Size returns the size of the sample at the time the snapshot was taken.
-func (s *SampleSnapshot) Size() int { return len(s.values) }
+func (s *sampleSnapshot) Size() int { return len(s.values) }
// Snapshot returns the snapshot.
-func (s *SampleSnapshot) Snapshot() Sample { return s }
+func (s *sampleSnapshot) Snapshot() SampleSnapshot { return s }
// StdDev returns the standard deviation of values at the time the snapshot was
// taken.
-func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+func (s *sampleSnapshot) StdDev() float64 {
+ if s.variance == 0.0 {
+ s.variance = SampleVariance(s.mean, s.values)
+ }
+ return math.Sqrt(s.variance)
+}
// Sum returns the sum of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
-
-// Update panics.
-func (*SampleSnapshot) Update(int64) {
- panic("Update called on a SampleSnapshot")
-}
+func (s *sampleSnapshot) Sum() int64 { return s.sum }
// Values returns a copy of the values in the sample.
-func (s *SampleSnapshot) Values() []int64 {
+func (s *sampleSnapshot) Values() []int64 {
values := make([]int64, len(s.values))
copy(values, s.values)
return values
}
// Variance returns the variance of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
-
-// SampleStdDev returns the standard deviation of the slice of int64.
-func SampleStdDev(values []int64) float64 {
- return math.Sqrt(SampleVariance(values))
-}
-
-// SampleSum returns the sum of the slice of int64.
-func SampleSum(values []int64) int64 {
- var sum int64
- for _, v := range values {
- sum += v
+func (s *sampleSnapshot) Variance() float64 {
+ if s.variance == 0.0 {
+ s.variance = SampleVariance(s.mean, s.values)
}
- return sum
+ return s.variance
}
// SampleVariance returns the variance of the slice of int64.
-func SampleVariance(values []int64) float64 {
+func SampleVariance(mean float64, values []int64) float64 {
if len(values) == 0 {
return 0.0
}
- m := SampleMean(values)
var sum float64
for _, v := range values {
- d := float64(v) - m
+ d := float64(v) - mean
sum += d * d
}
return sum / float64(len(values))
@@ -445,83 +340,14 @@ s.count = 0
s.values = make([]int64, 0, s.reservoirSize)
}
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *UniformSample) Count() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *UniformSample) Max() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMax(s.values)
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *UniformSample) Mean() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMean(s.values)
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *UniformSample) Min() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMin(s.values)
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *UniformSample) Percentile(p float64) float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SamplePercentile(s.values, p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *UniformSample) Percentiles(ps []float64) []float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SamplePercentiles(s.values, ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *UniformSample) Size() int {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return len(s.values)
-}
-
// Snapshot returns a read-only copy of the sample.
-func (s *UniformSample) Snapshot() Sample {
+func (s *UniformSample) Snapshot() SampleSnapshot {
s.mutex.Lock()
- defer s.mutex.Unlock()
values := make([]int64, len(s.values))
copy(values, s.values)
- return &SampleSnapshot{
- count: s.count,
- values: values,
- }
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *UniformSample) StdDev() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleStdDev(s.values)
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *UniformSample) Sum() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleSum(s.values)
+ count := s.count
+ s.mutex.Unlock()
+ return newSampleSnapshot(count, values)
}
// Update samples a new value.
@@ -542,22 +368,6 @@ if r < int64(len(s.values)) {
s.values[int(r)] = v
}
}
-}
-
-// Values returns a copy of the values in the sample.
-func (s *UniformSample) Values() []int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- values := make([]int64, len(s.values))
- copy(values, s.values)
- return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *UniformSample) Variance() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleVariance(s.values)
}
// expDecaySample represents an individual sample in a heap.
diff --git ethereum/go-ethereum/metrics/sample_test.go taikoxyz/taiko-geth/metrics/sample_test.go
index 3ae128d56f67074030818d1f8f1b6f653a9105de..79673570554cb4e681ec185321077027fdbed2bf 100644
--- ethereum/go-ethereum/metrics/sample_test.go
+++ taikoxyz/taiko-geth/metrics/sample_test.go
@@ -8,28 +8,36 @@ "testing"
"time"
)
+const epsilonPercentile = .00000000001
+
// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
// expensive computations like Variance, the cost of copying the Sample, as
// approximated by a make and copy, is much greater than the cost of the
// computation for small samples and only slightly less for large samples.
func BenchmarkCompute1000(b *testing.B) {
s := make([]int64, 1000)
+ var sum int64
for i := 0; i < len(s); i++ {
s[i] = int64(i)
+ sum += int64(i)
}
+ mean := float64(sum) / float64(len(s))
b.ResetTimer()
for i := 0; i < b.N; i++ {
- SampleVariance(s)
+ SampleVariance(mean, s)
}
}
func BenchmarkCompute1000000(b *testing.B) {
s := make([]int64, 1000000)
+ var sum int64
for i := 0; i < len(s); i++ {
s[i] = int64(i)
+ sum += int64(i)
}
+ mean := float64(sum) / float64(len(s))
b.ResetTimer()
for i := 0; i < b.N; i++ {
- SampleVariance(s)
+ SampleVariance(mean, s)
}
}
func BenchmarkCopy1000(b *testing.B) {
@@ -79,65 +87,42 @@ func BenchmarkUniformSample1028(b *testing.B) {
benchmarkSample(b, NewUniformSample(1028))
}
-func TestExpDecaySample10(t *testing.T) {
- s := NewExpDecaySample(100, 0.99)
- for i := 0; i < 10; i++ {
- s.Update(int64(i))
- }
- if size := s.Count(); size != 10 {
- t.Errorf("s.Count(): 10 != %v\n", size)
- }
- if size := s.Size(); size != 10 {
- t.Errorf("s.Size(): 10 != %v\n", size)
- }
- if l := len(s.Values()); l != 10 {
- t.Errorf("len(s.Values()): 10 != %v\n", l)
+func min(a, b int) int {
+ if a < b {
+ return a
}
- for _, v := range s.Values() {
- if v > 10 || v < 0 {
- t.Errorf("out of range [0, 10): %v\n", v)
- }
- }
+ return b
}
-func TestExpDecaySample100(t *testing.T) {
- s := NewExpDecaySample(1000, 0.01)
- for i := 0; i < 100; i++ {
- s.Update(int64(i))
- }
- if size := s.Count(); size != 100 {
- t.Errorf("s.Count(): 100 != %v\n", size)
- }
- if size := s.Size(); size != 100 {
- t.Errorf("s.Size(): 100 != %v\n", size)
- }
- if l := len(s.Values()); l != 100 {
- t.Errorf("len(s.Values()): 100 != %v\n", l)
- }
- for _, v := range s.Values() {
- if v > 100 || v < 0 {
- t.Errorf("out of range [0, 100): %v\n", v)
+func TestExpDecaySample(t *testing.T) {
+ for _, tc := range []struct {
+ reservoirSize int
+ alpha float64
+ updates int
+ }{
+ {100, 0.99, 10},
+ {1000, 0.01, 100},
+ {100, 0.99, 1000},
+ } {
+ sample := NewExpDecaySample(tc.reservoirSize, tc.alpha)
+ for i := 0; i < tc.updates; i++ {
+ sample.Update(int64(i))
}
- }
-}
-
-func TestExpDecaySample1000(t *testing.T) {
- s := NewExpDecaySample(100, 0.99)
- for i := 0; i < 1000; i++ {
- s.Update(int64(i))
- }
- if size := s.Count(); size != 1000 {
- t.Errorf("s.Count(): 1000 != %v\n", size)
- }
- if size := s.Size(); size != 100 {
- t.Errorf("s.Size(): 100 != %v\n", size)
- }
- if l := len(s.Values()); l != 100 {
- t.Errorf("len(s.Values()): 100 != %v\n", l)
- }
- for _, v := range s.Values() {
- if v > 1000 || v < 0 {
- t.Errorf("out of range [0, 1000): %v\n", v)
+ snap := sample.Snapshot()
+ if have, want := int(snap.Count()), tc.updates; have != want {
+ t.Errorf("have %d want %d", have, want)
+ }
+ if have, want := snap.Size(), min(tc.updates, tc.reservoirSize); have != want {
+ t.Errorf("have %d want %d", have, want)
+ }
+ values := snap.(*sampleSnapshot).values
+ if have, want := len(values), min(tc.updates, tc.reservoirSize); have != want {
+ t.Errorf("have %d want %d", have, want)
+ }
+ for _, v := range values {
+ if v > int64(tc.updates) || v < 0 {
+ t.Errorf("out of range [0, %d): %v", tc.updates, v)
+ }
}
}
}
@@ -147,15 +132,16 @@ // nanosecond duration since start rather than second duration since start.
// The priority becomes +Inf quickly after starting if this is done,
// effectively freezing the set of samples until a rescale step happens.
func TestExpDecaySampleNanosecondRegression(t *testing.T) {
- s := NewExpDecaySample(100, 0.99)
+ sw := NewExpDecaySample(100, 0.99)
for i := 0; i < 100; i++ {
- s.Update(10)
+ sw.Update(10)
}
time.Sleep(1 * time.Millisecond)
for i := 0; i < 100; i++ {
- s.Update(20)
+ sw.Update(20)
}
- v := s.Values()
+ s := sw.Snapshot()
+ v := s.(*sampleSnapshot).values
avg := float64(0)
for i := 0; i < len(v); i++ {
avg += float64(v[i])
@@ -194,24 +180,27 @@ s := NewExpDecaySample(100, 0.99).(*ExpDecaySample).SetRand(rand.New(rand.NewSource(1)))
for i := 1; i <= 10000; i++ {
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
}
- testExpDecaySampleStatistics(t, s)
+ testExpDecaySampleStatistics(t, s.Snapshot())
}
func TestUniformSample(t *testing.T) {
- s := NewUniformSample(100)
+ sw := NewUniformSample(100)
for i := 0; i < 1000; i++ {
- s.Update(int64(i))
+ sw.Update(int64(i))
}
+ s := sw.Snapshot()
if size := s.Count(); size != 1000 {
t.Errorf("s.Count(): 1000 != %v\n", size)
}
if size := s.Size(); size != 100 {
t.Errorf("s.Size(): 100 != %v\n", size)
}
- if l := len(s.Values()); l != 100 {
+ values := s.(*sampleSnapshot).values
+
+ if l := len(values); l != 100 {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
- for _, v := range s.Values() {
+ for _, v := range values {
if v > 1000 || v < 0 {
t.Errorf("out of range [0, 100): %v\n", v)
}
@@ -219,12 +208,13 @@ }
}
func TestUniformSampleIncludesTail(t *testing.T) {
- s := NewUniformSample(100)
+ sw := NewUniformSample(100)
max := 100
for i := 0; i < max; i++ {
- s.Update(int64(i))
+ sw.Update(int64(i))
}
- v := s.Values()
+ s := sw.Snapshot()
+ v := s.(*sampleSnapshot).values
sum := 0
exp := (max - 1) * max / 2
for i := 0; i < len(v); i++ {
@@ -250,7 +240,7 @@ s := NewUniformSample(100).(*UniformSample).SetRand(rand.New(rand.NewSource(1)))
for i := 1; i <= 10000; i++ {
s.Update(int64(i))
}
- testUniformSampleStatistics(t, s)
+ testUniformSampleStatistics(t, s.Snapshot())
}
func benchmarkSample(b *testing.B, s Sample) {
@@ -267,7 +257,7 @@ runtime.ReadMemStats(&memStats)
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
}
-func testExpDecaySampleStatistics(t *testing.T, s Sample) {
+func testExpDecaySampleStatistics(t *testing.T, s SampleSnapshot) {
if count := s.Count(); count != 10000 {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
@@ -295,7 +285,7 @@ t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
}
}
-func testUniformSampleStatistics(t *testing.T, s Sample) {
+func testUniformSampleStatistics(t *testing.T, s SampleSnapshot) {
if count := s.Count(); count != 10000 {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
@@ -349,8 +339,22 @@ }
}
}()
for i := 0; i < 1000; i++ {
- s.Count()
+ s.Snapshot().Count()
time.Sleep(5 * time.Millisecond)
}
quit <- struct{}{}
}
+
+func BenchmarkCalculatePercentiles(b *testing.B) {
+ pss := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
+ var vals []int64
+ for i := 0; i < 1000; i++ {
+ vals = append(vals, int64(rand.Int31()))
+ }
+ v := make([]int64, len(vals))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ copy(v, vals)
+ _ = CalculatePercentiles(v, pss)
+ }
+}
diff --git ethereum/go-ethereum/metrics/syslog.go taikoxyz/taiko-geth/metrics/syslog.go
index f23b07e199f32d6e1c392404603fc30f140cb72c..fd856d69731616fc0e33d95069c8222fa7b1f8bf 100644
--- ethereum/go-ethereum/metrics/syslog.go
+++ taikoxyz/taiko-geth/metrics/syslog.go
@@ -16,13 +16,15 @@ for range time.Tick(d) {
r.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
- w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+ w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Snapshot().Count()))
case CounterFloat64:
- w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Count()))
+ w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Snapshot().Count()))
case Gauge:
- w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
+ w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Snapshot().Value()))
case GaugeFloat64:
- w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
+ w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Snapshot().Value()))
+ case GaugeInfo:
+ w.Info(fmt.Sprintf("gauge %s: value: %s", name, metric.Snapshot().Value()))
case Healthcheck:
metric.Check()
w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
diff --git ethereum/go-ethereum/metrics/testdata/opentsb.want taikoxyz/taiko-geth/metrics/testdata/opentsb.want
new file mode 100644
index 0000000000000000000000000000000000000000..43fe1b2ac27a82f9d140de6bf22415fe7db11dc8
--- /dev/null
+++ taikoxyz/taiko-geth/metrics/testdata/opentsb.want
@@ -0,0 +1,23 @@
+put pre.elite.count 978307200 1337 host=hal9000
+put pre.elite.one-minute 978307200 0.00 host=hal9000
+put pre.elite.five-minute 978307200 0.00 host=hal9000
+put pre.elite.fifteen-minute 978307200 0.00 host=hal9000
+put pre.elite.mean 978307200 0.00 host=hal9000
+put pre.foo.value 978307200 {"chain_id":"5"} host=hal9000
+put pre.months.count 978307200 12 host=hal9000
+put pre.pi.value 978307200 3.140000 host=hal9000
+put pre.second.count 978307200 1 host=hal9000
+put pre.second.min 978307200 1000 host=hal9000
+put pre.second.max 978307200 1000 host=hal9000
+put pre.second.mean 978307200 1000.00 host=hal9000
+put pre.second.std-dev 978307200 0.00 host=hal9000
+put pre.second.50-percentile 978307200 1000.00 host=hal9000
+put pre.second.75-percentile 978307200 1000.00 host=hal9000
+put pre.second.95-percentile 978307200 1000.00 host=hal9000
+put pre.second.99-percentile 978307200 1000.00 host=hal9000
+put pre.second.999-percentile 978307200 1000.00 host=hal9000
+put pre.second.one-minute 978307200 0.00 host=hal9000
+put pre.second.five-minute 978307200 0.00 host=hal9000
+put pre.second.fifteen-minute 978307200 0.00 host=hal9000
+put pre.second.mean-rate 978307200 0.00 host=hal9000
+put pre.tau.count 978307200 1.570000 host=hal9000
diff --git ethereum/go-ethereum/metrics/timer.go taikoxyz/taiko-geth/metrics/timer.go
index 2e1a9be47295f082b0ebabd97f9b44f7a1f691d5..576ad8aa3e63f9cfc89f9904a74a7e264c337efa 100644
--- ethereum/go-ethereum/metrics/timer.go
+++ taikoxyz/taiko-geth/metrics/timer.go
@@ -5,26 +5,18 @@ "sync"
"time"
)
+type TimerSnapshot interface {
+ HistogramSnapshot
+ MeterSnapshot
+}
+
// Timers capture the duration and rate of events.
type Timer interface {
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Rate1() float64
- Rate5() float64
- Rate15() float64
- RateMean() float64
- Snapshot() Timer
- StdDev() float64
+ Snapshot() TimerSnapshot
Stop()
- Sum() int64
Time(func())
+ UpdateSince(time.Time)
Update(time.Duration)
- UpdateSince(time.Time)
- Variance() float64
}
// GetOrRegisterTimer returns an existing Timer or constructs and registers a
@@ -78,61 +70,11 @@
// NilTimer is a no-op Timer.
type NilTimer struct{}
-// Count is a no-op.
-func (NilTimer) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilTimer) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilTimer) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilTimer) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilTimer) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilTimer) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Rate1 is a no-op.
-func (NilTimer) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilTimer) Rate5() float64 { return 0.0 }
-
-// Rate15 is a no-op.
-func (NilTimer) Rate15() float64 { return 0.0 }
-
-// RateMean is a no-op.
-func (NilTimer) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilTimer) Snapshot() Timer { return NilTimer{} }
-
-// StdDev is a no-op.
-func (NilTimer) StdDev() float64 { return 0.0 }
-
-// Stop is a no-op.
-func (NilTimer) Stop() {}
-
-// Sum is a no-op.
-func (NilTimer) Sum() int64 { return 0 }
-
-// Time is a no-op.
-func (NilTimer) Time(f func()) { f() }
-
-// Update is a no-op.
-func (NilTimer) Update(time.Duration) {}
-
-// UpdateSince is a no-op.
-func (NilTimer) UpdateSince(time.Time) {}
-
-// Variance is a no-op.
-func (NilTimer) Variance() float64 { return 0.0 }
+func (NilTimer) Snapshot() TimerSnapshot { return (*emptySnapshot)(nil) }
+func (NilTimer) Stop() {}
+func (NilTimer) Time(f func()) { f() }
+func (NilTimer) Update(time.Duration) {}
+func (NilTimer) UpdateSince(time.Time) {}
// StandardTimer is the standard implementation of a Timer and uses a Histogram
// and Meter.
@@ -142,82 +84,21 @@ meter Meter
mutex sync.Mutex
}
-// Count returns the number of events recorded.
-func (t *StandardTimer) Count() int64 {
- return t.histogram.Count()
-}
-
-// Max returns the maximum value in the sample.
-func (t *StandardTimer) Max() int64 {
- return t.histogram.Max()
-}
-
-// Mean returns the mean of the values in the sample.
-func (t *StandardTimer) Mean() float64 {
- return t.histogram.Mean()
-}
-
-// Min returns the minimum value in the sample.
-func (t *StandardTimer) Min() int64 {
- return t.histogram.Min()
-}
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (t *StandardTimer) Percentile(p float64) float64 {
- return t.histogram.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (t *StandardTimer) Percentiles(ps []float64) []float64 {
- return t.histogram.Percentiles(ps)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (t *StandardTimer) Rate1() float64 {
- return t.meter.Rate1()
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (t *StandardTimer) Rate5() float64 {
- return t.meter.Rate5()
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (t *StandardTimer) Rate15() float64 {
- return t.meter.Rate15()
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (t *StandardTimer) RateMean() float64 {
- return t.meter.RateMean()
-}
-
// Snapshot returns a read-only copy of the timer.
-func (t *StandardTimer) Snapshot() Timer {
+func (t *StandardTimer) Snapshot() TimerSnapshot {
t.mutex.Lock()
defer t.mutex.Unlock()
- return &TimerSnapshot{
- histogram: t.histogram.Snapshot().(*HistogramSnapshot),
- meter: t.meter.Snapshot().(*MeterSnapshot),
+ return &timerSnapshot{
+ histogram: t.histogram.Snapshot(),
+ meter: t.meter.Snapshot(),
}
}
-// StdDev returns the standard deviation of the values in the sample.
-func (t *StandardTimer) StdDev() float64 {
- return t.histogram.StdDev()
-}
-
// Stop stops the meter.
func (t *StandardTimer) Stop() {
t.meter.Stop()
}
-// Sum returns the sum in the sample.
-func (t *StandardTimer) Sum() int64 {
- return t.histogram.Sum()
-}
-
// Record the duration of the execution of the given function.
func (t *StandardTimer) Time(f func()) {
ts := time.Now()
@@ -241,86 +122,63 @@ t.histogram.Update(int64(time.Since(ts)))
t.meter.Mark(1)
}
-// Variance returns the variance of the values in the sample.
-func (t *StandardTimer) Variance() float64 {
- return t.histogram.Variance()
-}
-
-// TimerSnapshot is a read-only copy of another Timer.
-type TimerSnapshot struct {
- histogram *HistogramSnapshot
- meter *MeterSnapshot
+// timerSnapshot is a read-only copy of another Timer.
+type timerSnapshot struct {
+ histogram HistogramSnapshot
+ meter MeterSnapshot
}
// Count returns the number of events recorded at the time the snapshot was
// taken.
-func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+func (t *timerSnapshot) Count() int64 { return t.histogram.Count() }
// Max returns the maximum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+func (t *timerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (t *timerSnapshot) Size() int { return t.histogram.Size() }
// Mean returns the mean value at the time the snapshot was taken.
-func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+func (t *timerSnapshot) Mean() float64 { return t.histogram.Mean() }
// Min returns the minimum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+func (t *timerSnapshot) Min() int64 { return t.histogram.Min() }
// Percentile returns an arbitrary percentile of sampled values at the time the
// snapshot was taken.
-func (t *TimerSnapshot) Percentile(p float64) float64 {
+func (t *timerSnapshot) Percentile(p float64) float64 {
return t.histogram.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of sampled values at
// the time the snapshot was taken.
-func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+func (t *timerSnapshot) Percentiles(ps []float64) []float64 {
return t.histogram.Percentiles(ps)
}
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
-func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+func (t *timerSnapshot) Rate1() float64 { return t.meter.Rate1() }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
-func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+func (t *timerSnapshot) Rate5() float64 { return t.meter.Rate5() }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
-func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+func (t *timerSnapshot) Rate15() float64 { return t.meter.Rate15() }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
-func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
-
-// Snapshot returns the snapshot.
-func (t *TimerSnapshot) Snapshot() Timer { return t }
+func (t *timerSnapshot) RateMean() float64 { return t.meter.RateMean() }
// StdDev returns the standard deviation of the values at the time the snapshot
// was taken.
-func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
-
-// Stop is a no-op.
-func (t *TimerSnapshot) Stop() {}
+func (t *timerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
// Sum returns the sum at the time the snapshot was taken.
-func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
-
-// Time panics.
-func (*TimerSnapshot) Time(func()) {
- panic("Time called on a TimerSnapshot")
-}
-
-// Update panics.
-func (*TimerSnapshot) Update(time.Duration) {
- panic("Update called on a TimerSnapshot")
-}
-
-// UpdateSince panics.
-func (*TimerSnapshot) UpdateSince(time.Time) {
- panic("UpdateSince called on a TimerSnapshot")
-}
+func (t *timerSnapshot) Sum() int64 { return t.histogram.Sum() }
// Variance returns the variance of the values at the time the snapshot was
// taken.
-func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
+func (t *timerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git ethereum/go-ethereum/metrics/timer_test.go taikoxyz/taiko-geth/metrics/timer_test.go
index 903e8e8d496e7cbea9ec3a1a28dd9c659c4c88e9..f10de16c9c23d534891bf1ea97078c819bba67f3 100644
--- ethereum/go-ethereum/metrics/timer_test.go
+++ taikoxyz/taiko-geth/metrics/timer_test.go
@@ -18,7 +18,7 @@
func TestGetOrRegisterTimer(t *testing.T) {
r := NewRegistry()
NewRegisteredTimer("foo", r).Update(47)
- if tm := GetOrRegisterTimer("foo", r); tm.Count() != 1 {
+ if tm := GetOrRegisterTimer("foo", r).Snapshot(); tm.Count() != 1 {
t.Fatal(tm)
}
}
@@ -27,7 +27,7 @@ func TestTimerExtremes(t *testing.T) {
tm := NewTimer()
tm.Update(math.MaxInt64)
tm.Update(0)
- if stdDev := tm.StdDev(); stdDev != 4.611686018427388e+18 {
+ if stdDev := tm.Snapshot().StdDev(); stdDev != 4.611686018427388e+18 {
t.Errorf("tm.StdDev(): 4.611686018427388e+18 != %v\n", stdDev)
}
}
@@ -56,7 +56,7 @@ actualTime = time.Since(testStart)
})
var (
drift = time.Millisecond * 2
- measured = time.Duration(tm.Max())
+ measured = time.Duration(tm.Snapshot().Max())
ceil = actualTime + drift
floor = actualTime - drift
)
@@ -66,7 +66,7 @@ }
}
func TestTimerZero(t *testing.T) {
- tm := NewTimer()
+ tm := NewTimer().Snapshot()
if count := tm.Count(); count != 0 {
t.Errorf("tm.Count(): 0 != %v\n", count)
}
@@ -110,5 +110,5 @@ func ExampleGetOrRegisterTimer() {
m := "account.create.latency"
t := GetOrRegisterTimer(m, nil)
t.Update(47)
- fmt.Println(t.Max()) // Output: 47
+ fmt.Println(t.Snapshot().Max()) // Output: 47
}
diff --git ethereum/go-ethereum/metrics/writer.go taikoxyz/taiko-geth/metrics/writer.go
index 82434e9d1d62a93a5363175fc0910e8d5c7c3224..098da45c27b2acbe0eeb0f92d7acbd0f43ca1a3f 100644
--- ethereum/go-ethereum/metrics/writer.go
+++ taikoxyz/taiko-geth/metrics/writer.go
@@ -29,16 +29,19 @@ for _, namedMetric := range namedMetrics {
switch metric := namedMetric.m.(type) {
case Counter:
fmt.Fprintf(w, "counter %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", metric.Count())
+ fmt.Fprintf(w, " count: %9d\n", metric.Snapshot().Count())
case CounterFloat64:
fmt.Fprintf(w, "counter %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %f\n", metric.Count())
+ fmt.Fprintf(w, " count: %f\n", metric.Snapshot().Count())
case Gauge:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
- fmt.Fprintf(w, " value: %9d\n", metric.Value())
+ fmt.Fprintf(w, " value: %9d\n", metric.Snapshot().Value())
case GaugeFloat64:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
- fmt.Fprintf(w, " value: %f\n", metric.Value())
+ fmt.Fprintf(w, " value: %f\n", metric.Snapshot().Value())
+ case GaugeInfo:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %s\n", metric.Snapshot().Value().String())
case Healthcheck:
metric.Check()
fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
diff --git ethereum/go-ethereum/miner/stress/clique/main.go taikoxyz/taiko-geth/miner/stress/clique/main.go
index 53ff2450c55be859ed1dbb79be8011c3239b228e..7b29e63dfc699f5e19b80503cef48e4f75f4a84d 100644
--- ethereum/go-ethereum/miner/stress/clique/main.go
+++ taikoxyz/taiko-geth/miner/stress/clique/main.go
@@ -30,7 +30,6 @@ "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/fdlimit"
"github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@@ -133,7 +132,7 @@ tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(100000000000), nil), types.HomesteadSigner{}, faucets[index])
if err != nil {
panic(err)
}
- if err := backend.TxPool().Add([]*txpool.Transaction{{Tx: tx}}, true, false); err != nil {
+ if err := backend.TxPool().Add([]*types.Transaction{tx}, true, false); err != nil {
panic(err)
}
nonces[index]++
@@ -148,7 +147,7 @@
// makeGenesis creates a custom Clique genesis block based on some pre-defined
// signer and faucet accounts.
func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core.Genesis {
- // Create a Clique network based off of the Seplia config
+ // Create a Clique network based off of the Sepolia config
genesis := core.DefaultSepoliaGenesisBlock()
genesis.GasLimit = 25000000
diff --git ethereum/go-ethereum/node/defaults.go taikoxyz/taiko-geth/node/defaults.go
index d8f718121e80287e30cc2ce94ce38605cb726e61..42d9d4cde0fc9830cd33252926125eb5c48ae7da 100644
--- ethereum/go-ethereum/node/defaults.go
+++ taikoxyz/taiko-geth/node/defaults.go
@@ -36,6 +36,13 @@ DefaultAuthHost = "localhost" // Default host interface for the authenticated apis
DefaultAuthPort = 8551 // Default port for the authenticated apis
)
+const (
+ // Engine API batch limits: these are not configurable by users, and should cover the
+ // needs of all CLs.
+ engineAPIBatchItemLimit = 2000
+ engineAPIBatchResponseSizeLimit = 250 * 1000 * 1000
+)
+
var (
DefaultAuthCors = []string{"localhost"} // Default cors domain for the authenticated apis
DefaultAuthVhosts = []string{"localhost"} // Default virtual hosts for the authenticated apis
diff --git ethereum/go-ethereum/node/node.go taikoxyz/taiko-geth/node/node.go
index da41169c52b8676da59fb72ad54c7449680c1682..41c9971fe8e6de8fccd9d0a2a674957e217e81dd 100644
--- ethereum/go-ethereum/node/node.go
+++ taikoxyz/taiko-geth/node/node.go
@@ -449,8 +449,11 @@ server := n.httpAuth
if err := server.setListenAddr(n.config.AuthAddr, port); err != nil {
return err
}
- sharedConfig := rpcConfig
- sharedConfig.jwtSecret = secret
+ sharedConfig := rpcEndpointConfig{
+ jwtSecret: secret,
+ batchItemLimit: engineAPIBatchItemLimit,
+ batchResponseSizeLimit: engineAPIBatchResponseSizeLimit,
+ }
if err := server.enableRPC(allAPIs, httpConfig{
CorsAllowedOrigins: DefaultAuthCors,
Vhosts: n.config.AuthVirtualHosts,
diff --git ethereum/go-ethereum/p2p/discover/v4wire/v4wire.go taikoxyz/taiko-geth/p2p/discover/v4wire/v4wire.go
index 3935068cd9dba811cab1496be9fb249eaca2a022..9c59359fb2c2e005503c3462f2305d58d260f24c 100644
--- ethereum/go-ethereum/p2p/discover/v4wire/v4wire.go
+++ taikoxyz/taiko-geth/p2p/discover/v4wire/v4wire.go
@@ -238,6 +238,8 @@ req = new(ENRResponse)
default:
return nil, fromKey, hash, fmt.Errorf("unknown type: %d", ptype)
}
+ // Here we use NewStream to allow for additional data after the first
+ // RLP object (forward-compatibility).
s := rlp.NewStream(bytes.NewReader(sigdata[1:]), 0)
err = s.Decode(req)
return req, fromKey, hash, err
diff --git ethereum/go-ethereum/rlp/decode.go taikoxyz/taiko-geth/rlp/decode.go
index c9b50e8c18795ee3da5634d6184f36ffaafedaa8..9b17d2d810846b4c5473644bfda167bcca6593db 100644
--- ethereum/go-ethereum/rlp/decode.go
+++ taikoxyz/taiko-geth/rlp/decode.go
@@ -90,7 +90,7 @@
// DecodeBytes parses RLP data from b into val. Please see package-level documentation for
// the decoding rules. The input must contain exactly one value and no trailing data.
func DecodeBytes(b []byte, val interface{}) error {
- r := bytes.NewReader(b)
+ r := (*sliceReader)(&b)
stream := streamPool.Get().(*Stream)
defer streamPool.Put(stream)
@@ -99,7 +99,7 @@ stream.Reset(r, uint64(len(b)))
if err := stream.Decode(val); err != nil {
return err
}
- if r.Len() > 0 {
+ if len(b) > 0 {
return ErrMoreThanOneValue
}
return nil
@@ -1182,3 +1182,23 @@ return false, 0
}
return true, s.stack[len(s.stack)-1]
}
+
+type sliceReader []byte
+
+func (sr *sliceReader) Read(b []byte) (int, error) {
+ if len(*sr) == 0 {
+ return 0, io.EOF
+ }
+ n := copy(b, *sr)
+ *sr = (*sr)[n:]
+ return n, nil
+}
+
+func (sr *sliceReader) ReadByte() (byte, error) {
+ if len(*sr) == 0 {
+ return 0, io.EOF
+ }
+ b := (*sr)[0]
+ *sr = (*sr)[1:]
+ return b, nil
+}
diff --git ethereum/go-ethereum/rlp/rlpgen/main.go taikoxyz/taiko-geth/rlp/rlpgen/main.go
index 25d4393cc6561db2b6c750da21f03092181a9ac6..b3a74b9df13f817e33e6e9940e4e731fe1a0e4b6 100644
--- ethereum/go-ethereum/rlp/rlpgen/main.go
+++ taikoxyz/taiko-geth/rlp/rlpgen/main.go
@@ -73,9 +73,8 @@ // process generates the Go code.
func (cfg *Config) process() (code []byte, err error) {
// Load packages.
pcfg := &packages.Config{
- Mode: packages.NeedName | packages.NeedTypes | packages.NeedImports | packages.NeedDeps,
- Dir: cfg.Dir,
- BuildFlags: []string{"-tags", "norlpgen"},
+ Mode: packages.NeedName | packages.NeedTypes,
+ Dir: cfg.Dir,
}
ps, err := packages.Load(pcfg, pathOfPackageRLP, ".")
if err != nil {
@@ -117,8 +116,6 @@ // Add build comments.
// This is done here to avoid processing these lines with gofmt.
var header bytes.Buffer
fmt.Fprint(&header, "// Code generated by rlpgen. DO NOT EDIT.\n\n")
- fmt.Fprint(&header, "//go:build !norlpgen\n")
- fmt.Fprint(&header, "// +build !norlpgen\n\n")
return append(header.Bytes(), code...), nil
}
diff --git ethereum/go-ethereum/rpc/client_opt.go taikoxyz/taiko-geth/rpc/client_opt.go
index 5bef08cca8410bf22cf949886b912acf755f6fda..3fa045a9b9f39076d0ed62f845eb0e3864bba241 100644
--- ethereum/go-ethereum/rpc/client_opt.go
+++ taikoxyz/taiko-geth/rpc/client_opt.go
@@ -34,7 +34,8 @@ httpHeaders http.Header
httpAuth HTTPAuth
// WebSocket options
- wsDialer *websocket.Dialer
+ wsDialer *websocket.Dialer
+ wsMessageSizeLimit *int64 // wsMessageSizeLimit nil = default, 0 = no limit
// RPC handler options
idgen func() ID
@@ -63,6 +64,14 @@ // WithWebsocketDialer configures the websocket.Dialer used by the RPC client.
func WithWebsocketDialer(dialer websocket.Dialer) ClientOption {
return optionFunc(func(cfg *clientConfig) {
cfg.wsDialer = &dialer
+ })
+}
+
+// WithWebsocketMessageSizeLimit configures the websocket message size limit used by the RPC
+// client. Passing a limit of 0 means no limit.
+func WithWebsocketMessageSizeLimit(messageSizeLimit int64) ClientOption {
+ return optionFunc(func(cfg *clientConfig) {
+ cfg.wsMessageSizeLimit = &messageSizeLimit
})
}
diff --git ethereum/go-ethereum/rpc/ipc_windows.go taikoxyz/taiko-geth/rpc/ipc_windows.go
index adb1826f0c802f2657a0707857b52fe13b84c616..efec38cf37299b9c3092daadae26da96d8fb323a 100644
--- ethereum/go-ethereum/rpc/ipc_windows.go
+++ taikoxyz/taiko-geth/rpc/ipc_windows.go
@@ -24,7 +24,7 @@ "context"
"net"
"time"
- "gopkg.in/natefinch/npipe.v2"
+ "github.com/Microsoft/go-winio"
)
// This is used if the dialing context has no deadline. It is much smaller than the
@@ -33,17 +33,12 @@ const defaultPipeDialTimeout = 2 * time.Second
// ipcListen will create a named pipe on the given endpoint.
func ipcListen(endpoint string) (net.Listener, error) {
- return npipe.Listen(endpoint)
+ return winio.ListenPipe(endpoint, nil)
}
// newIPCConnection will connect to a named pipe with the given endpoint as name.
func newIPCConnection(ctx context.Context, endpoint string) (net.Conn, error) {
- timeout := defaultPipeDialTimeout
- if deadline, ok := ctx.Deadline(); ok {
- timeout = deadline.Sub(time.Now())
- if timeout < 0 {
- timeout = 0
- }
- }
- return npipe.DialTimeout(endpoint, timeout)
+ ctx, cancel := context.WithTimeout(ctx, defaultPipeDialTimeout)
+ defer cancel()
+ return winio.DialPipeContext(ctx, endpoint)
}
diff --git ethereum/go-ethereum/rpc/server_test.go taikoxyz/taiko-geth/rpc/server_test.go
index 5d3929dfdc692652ea577a5d56e8f8f804967ac8..9d1c7fb5f0fe730ff946698707647913a5ed93ac 100644
--- ethereum/go-ethereum/rpc/server_test.go
+++ taikoxyz/taiko-geth/rpc/server_test.go
@@ -32,7 +32,8 @@ func TestServerRegisterName(t *testing.T) {
server := NewServer()
service := new(testService)
- if err := server.RegisterName("test", service); err != nil {
+ svcName := "test"
+ if err := server.RegisterName(svcName, service); err != nil {
t.Fatalf("%v", err)
}
@@ -40,12 +41,12 @@ if len(server.services.services) != 2 {
t.Fatalf("Expected 2 service entries, got %d", len(server.services.services))
}
- svc, ok := server.services.services["test"]
+ svc, ok := server.services.services[svcName]
if !ok {
- t.Fatalf("Expected service calc to be registered")
+ t.Fatalf("Expected service %s to be registered", svcName)
}
- wantCallbacks := 13
+ wantCallbacks := 14
if len(svc.callbacks) != wantCallbacks {
t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks))
}
diff --git ethereum/go-ethereum/rpc/testservice_test.go taikoxyz/taiko-geth/rpc/testservice_test.go
index eab67f1dd5d8a7eeff07a601c0e8617dcd863d4f..7d873af6670e3e7696394bbeea7c4c8440f44bc0 100644
--- ethereum/go-ethereum/rpc/testservice_test.go
+++ taikoxyz/taiko-geth/rpc/testservice_test.go
@@ -90,6 +90,10 @@ func (s *testService) EchoWithCtx(ctx context.Context, str string, i int, args *echoArgs) echoResult {
return echoResult{str, i, args}
}
+func (s *testService) Repeat(msg string, i int) string {
+ return strings.Repeat(msg, i)
+}
+
func (s *testService) PeerInfo(ctx context.Context) PeerInfo {
return PeerInfoFromContext(ctx)
}
diff --git ethereum/go-ethereum/rpc/websocket.go taikoxyz/taiko-geth/rpc/websocket.go
index b1213fdfa663eb2829e57bf6ec3e2880f0c19d47..538e53a31b7c363af28b6ec862ed163cbea05ed5 100644
--- ethereum/go-ethereum/rpc/websocket.go
+++ taikoxyz/taiko-geth/rpc/websocket.go
@@ -38,7 +38,7 @@ wsWriteBuffer = 1024
wsPingInterval = 30 * time.Second
wsPingWriteTimeout = 5 * time.Second
wsPongTimeout = 30 * time.Second
- wsMessageSizeLimit = 32 * 1024 * 1024
+ wsDefaultReadLimit = 32 * 1024 * 1024
)
var wsBufferPool = new(sync.Pool)
@@ -60,7 +60,7 @@ if err != nil {
log.Debug("WebSocket upgrade failed", "err", err)
return
}
- codec := newWebsocketCodec(conn, r.Host, r.Header)
+ codec := newWebsocketCodec(conn, r.Host, r.Header, wsDefaultReadLimit)
s.ServeCodec(codec, 0)
})
}
@@ -251,7 +251,11 @@ hErr.status = resp.Status
}
return nil, hErr
}
- return newWebsocketCodec(conn, dialURL, header), nil
+ messageSizeLimit := int64(wsDefaultReadLimit)
+ if cfg.wsMessageSizeLimit != nil && *cfg.wsMessageSizeLimit >= 0 {
+ messageSizeLimit = *cfg.wsMessageSizeLimit
+ }
+ return newWebsocketCodec(conn, dialURL, header, messageSizeLimit), nil
}
return connect, nil
}
@@ -278,24 +282,21 @@ *jsonCodec
conn *websocket.Conn
info PeerInfo
- wg sync.WaitGroup
- pingReset chan struct{}
+ wg sync.WaitGroup
+ pingReset chan struct{}
+ pongReceived chan struct{}
}
-func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header) ServerCodec {
- conn.SetReadLimit(wsMessageSizeLimit)
- conn.SetPongHandler(func(appData string) error {
- conn.SetReadDeadline(time.Time{})
- return nil
- })
-
+func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header, readLimit int64) ServerCodec {
+ conn.SetReadLimit(readLimit)
encode := func(v interface{}, isErrorResponse bool) error {
return conn.WriteJSON(v)
}
wc := &websocketCodec{
- jsonCodec: NewFuncCodec(conn, encode, conn.ReadJSON).(*jsonCodec),
- conn: conn,
- pingReset: make(chan struct{}, 1),
+ jsonCodec: NewFuncCodec(conn, encode, conn.ReadJSON).(*jsonCodec),
+ conn: conn,
+ pingReset: make(chan struct{}, 1),
+ pongReceived: make(chan struct{}),
info: PeerInfo{
Transport: "ws",
RemoteAddr: conn.RemoteAddr().String(),
@@ -306,6 +307,13 @@ wc.info.HTTP.Host = host
wc.info.HTTP.Origin = req.Get("Origin")
wc.info.HTTP.UserAgent = req.Get("User-Agent")
// Start pinger.
+ conn.SetPongHandler(func(appData string) error {
+ select {
+ case wc.pongReceived <- struct{}{}:
+ case <-wc.closed():
+ }
+ return nil
+ })
wc.wg.Add(1)
go wc.pingLoop()
return wc
@@ -334,26 +342,31 @@ }
// pingLoop sends periodic ping frames when the connection is idle.
func (wc *websocketCodec) pingLoop() {
- var timer = time.NewTimer(wsPingInterval)
+ var pingTimer = time.NewTimer(wsPingInterval)
defer wc.wg.Done()
- defer timer.Stop()
+ defer pingTimer.Stop()
for {
select {
case <-wc.closed():
return
+
case <-wc.pingReset:
- if !timer.Stop() {
- <-timer.C
+ if !pingTimer.Stop() {
+ <-pingTimer.C
}
- timer.Reset(wsPingInterval)
- case <-timer.C:
+ pingTimer.Reset(wsPingInterval)
+
+ case <-pingTimer.C:
wc.jsonCodec.encMu.Lock()
wc.conn.SetWriteDeadline(time.Now().Add(wsPingWriteTimeout))
wc.conn.WriteMessage(websocket.PingMessage, nil)
wc.conn.SetReadDeadline(time.Now().Add(wsPongTimeout))
wc.jsonCodec.encMu.Unlock()
- timer.Reset(wsPingInterval)
+ pingTimer.Reset(wsPingInterval)
+
+ case <-wc.pongReceived:
+ wc.conn.SetReadDeadline(time.Time{})
}
}
}
diff --git ethereum/go-ethereum/rpc/websocket_test.go taikoxyz/taiko-geth/rpc/websocket_test.go
index fb9357605b8b9ad34eae51a8876b7e5b3c6c9214..e4ac5c3fad3f0cfde8b59b261fc842167fcb7a13 100644
--- ethereum/go-ethereum/rpc/websocket_test.go
+++ taikoxyz/taiko-geth/rpc/websocket_test.go
@@ -113,6 +113,66 @@ t.Fatal("no error for too large call")
}
}
+// This test checks whether the wsMessageSizeLimit option is obeyed.
+func TestWebsocketLargeRead(t *testing.T) {
+ t.Parallel()
+
+ var (
+ srv = newTestServer()
+ httpsrv = httptest.NewServer(srv.WebsocketHandler([]string{"*"}))
+ wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:")
+ )
+ defer srv.Stop()
+ defer httpsrv.Close()
+
+ testLimit := func(limit *int64) {
+ opts := []ClientOption{}
+ expLimit := int64(wsDefaultReadLimit)
+ if limit != nil && *limit >= 0 {
+ opts = append(opts, WithWebsocketMessageSizeLimit(*limit))
+ if *limit > 0 {
+ expLimit = *limit // 0 means infinite
+ }
+ }
+ client, err := DialOptions(context.Background(), wsURL, opts...)
+ if err != nil {
+ t.Fatalf("can't dial: %v", err)
+ }
+ defer client.Close()
+ // Remove some bytes for json encoding overhead.
+ underLimit := int(expLimit - 128)
+ overLimit := expLimit + 1
+ if expLimit == wsDefaultReadLimit {
+ // No point trying the full 32MB in tests. Just sanity-check that
+ // it's not obviously limited.
+ underLimit = 1024
+ overLimit = -1
+ }
+ var res string
+ // Check under limit
+ if err = client.Call(&res, "test_repeat", "A", underLimit); err != nil {
+ t.Fatalf("unexpected error with limit %d: %v", expLimit, err)
+ }
+ if len(res) != underLimit || strings.Count(res, "A") != underLimit {
+ t.Fatal("incorrect data")
+ }
+ // Check over limit
+ if overLimit > 0 {
+ err = client.Call(&res, "test_repeat", "A", expLimit+1)
+ if err == nil || err != websocket.ErrReadLimit {
+ t.Fatalf("wrong error with limit %d: %v expecting %v", expLimit, err, websocket.ErrReadLimit)
+ }
+ }
+ }
+ ptr := func(v int64) *int64 { return &v }
+
+ testLimit(ptr(-1)) // Should be ignored (use default)
+ testLimit(ptr(0)) // Should be ignored (use default)
+ testLimit(nil) // Should be ignored (use default)
+ testLimit(ptr(200))
+ testLimit(ptr(wsDefaultReadLimit * 2))
+}
+
func TestWebsocketPeerInfo(t *testing.T) {
var (
s = newTestServer()
@@ -206,7 +266,7 @@ )
defer srv.Stop()
defer httpsrv.Close()
- respLength := wsMessageSizeLimit - 50
+ respLength := wsDefaultReadLimit - 50
srv.RegisterName("test", largeRespService{respLength})
c, err := DialWebsocket(context.Background(), wsURL, "")
diff --git ethereum/go-ethereum/signer/core/api_test.go taikoxyz/taiko-geth/signer/core/api_test.go
index 9bb55bddca3175a6263194b4c6aa9eff88a477b1..5a9de161b3acfda6377bf36ac6424e45120f66ab 100644
--- ethereum/go-ethereum/signer/core/api_test.go
+++ taikoxyz/taiko-geth/signer/core/api_test.go
@@ -282,7 +282,7 @@ if err != nil {
t.Fatal(err)
}
parsedTx := &types.Transaction{}
- rlp.Decode(bytes.NewReader(res.Raw), parsedTx)
+ rlp.DecodeBytes(res.Raw, parsedTx)
//The tx should NOT be modified by the UI
if parsedTx.Value().Cmp(tx.Value.ToInt()) != 0 {
@@ -308,7 +308,7 @@ if err != nil {
t.Fatal(err)
}
parsedTx2 := &types.Transaction{}
- rlp.Decode(bytes.NewReader(res.Raw), parsedTx2)
+ rlp.DecodeBytes(res.Raw, parsedTx2)
//The tx should be modified by the UI
if parsedTx2.Value().Cmp(tx.Value.ToInt()) != 0 {
diff --git ethereum/go-ethereum/tests/block_test.go taikoxyz/taiko-geth/tests/block_test.go
index 2405da1cc7335aa76294322e8cc66e1c77aa1944..5764ae33e474639ee2234b62d1899d3c7fc10527 100644
--- ethereum/go-ethereum/tests/block_test.go
+++ taikoxyz/taiko-geth/tests/block_test.go
@@ -18,11 +18,12 @@ package tests
import (
"testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
)
func TestBlockchain(t *testing.T) {
- t.Parallel()
-
bt := new(testMatcher)
// General state tests are 'exported' as blockchain tests, but we can run them natively.
// For speedier CI-runs, the line below can be uncommented, so those are skipped.
@@ -48,14 +49,40 @@ // using 4.6 TGas
bt.skipLoad(`.*randomStatetest94.json.*`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
- if err := bt.checkFailure(t, test.Run(false, nil)); err != nil {
- t.Errorf("test without snapshotter failed: %v", err)
- }
- if err := bt.checkFailure(t, test.Run(true, nil)); err != nil {
- t.Errorf("test with snapshotter failed: %v", err)
- }
+ execBlockTest(t, bt, test)
})
// There is also a LegacyTests folder, containing blockchain tests generated
// prior to Istanbul. However, they are all derived from GeneralStateTests,
// which run natively, so there's no reason to run them here.
}
+
+// TestExecutionSpec runs the test fixtures from execution-spec-tests.
+func TestExecutionSpec(t *testing.T) {
+ if !common.FileExist(executionSpecDir) {
+ t.Skipf("directory %s does not exist", executionSpecDir)
+ }
+ bt := new(testMatcher)
+
+ bt.walk(t, executionSpecDir, func(t *testing.T, name string, test *BlockTest) {
+ execBlockTest(t, bt, test)
+ })
+}
+
+func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) {
+ if err := bt.checkFailure(t, test.Run(false, rawdb.HashScheme, nil)); err != nil {
+ t.Errorf("test in hash mode without snapshotter failed: %v", err)
+ return
+ }
+ if err := bt.checkFailure(t, test.Run(true, rawdb.HashScheme, nil)); err != nil {
+ t.Errorf("test in hash mode with snapshotter failed: %v", err)
+ return
+ }
+ if err := bt.checkFailure(t, test.Run(false, rawdb.PathScheme, nil)); err != nil {
+ t.Errorf("test in path mode without snapshotter failed: %v", err)
+ return
+ }
+ if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme, nil)); err != nil {
+ t.Errorf("test in path mode with snapshotter failed: %v", err)
+ return
+ }
+}
diff --git ethereum/go-ethereum/tests/block_test_util.go taikoxyz/taiko-geth/tests/block_test_util.go
index d3e525a387e3ba632cf2c557ba0ff065dd539cb9..ad1d34fb2bc731448ba33d496c0ae234910cfac0 100644
--- ethereum/go-ethereum/tests/block_test_util.go
+++ taikoxyz/taiko-geth/tests/block_test_util.go
@@ -38,6 +38,9 @@ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
)
// A BlockTest checks handling of entire blocks.
@@ -70,24 +73,27 @@
//go:generate go run github.com/fjl/gencodec -type btHeader -field-override btHeaderMarshaling -out gen_btheader.go
type btHeader struct {
- Bloom types.Bloom
- Coinbase common.Address
- MixHash common.Hash
- Nonce types.BlockNonce
- Number *big.Int
- Hash common.Hash
- ParentHash common.Hash
- ReceiptTrie common.Hash
- StateRoot common.Hash
- TransactionsTrie common.Hash
- UncleHash common.Hash
- ExtraData []byte
- Difficulty *big.Int
- GasLimit uint64
- GasUsed uint64
- Timestamp uint64
- BaseFeePerGas *big.Int
- WithdrawalsRoot *common.Hash
+ Bloom types.Bloom
+ Coinbase common.Address
+ MixHash common.Hash
+ Nonce types.BlockNonce
+ Number *big.Int
+ Hash common.Hash
+ ParentHash common.Hash
+ ReceiptTrie common.Hash
+ StateRoot common.Hash
+ TransactionsTrie common.Hash
+ UncleHash common.Hash
+ ExtraData []byte
+ Difficulty *big.Int
+ GasLimit uint64
+ GasUsed uint64
+ Timestamp uint64
+ BaseFeePerGas *big.Int
+ WithdrawalsRoot *common.Hash
+ BlobGasUsed *uint64
+ ExcessBlobGas *uint64
+ ParentBeaconBlockRoot *common.Hash
}
type btHeaderMarshaling struct {
@@ -98,18 +104,34 @@ GasLimit math.HexOrDecimal64
GasUsed math.HexOrDecimal64
Timestamp math.HexOrDecimal64
BaseFeePerGas *math.HexOrDecimal256
+ BlobGasUsed *math.HexOrDecimal64
+ ExcessBlobGas *math.HexOrDecimal64
}
-func (t *BlockTest) Run(snapshotter bool, tracer vm.EVMLogger) error {
+func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger) error {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
}
-
// import pre accounts & construct test genesis block & state root
- db := rawdb.NewMemoryDatabase()
+ var (
+ db = rawdb.NewMemoryDatabase()
+ tconf = &trie.Config{}
+ )
+ if scheme == rawdb.PathScheme {
+ tconf.PathDB = pathdb.Defaults
+ } else {
+ tconf.HashDB = hashdb.Defaults
+ }
+ // Commit genesis state
gspec := t.genesis(config)
- gblock := gspec.MustCommit(db)
+ triedb := trie.NewDatabase(db, tconf)
+ gblock, err := gspec.Commit(db, triedb)
+ if err != nil {
+ return err
+ }
+ triedb.Close() // close the db to prevent memory leak
+
if gblock.Hash() != t.json.Genesis.Hash {
return fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6])
}
@@ -119,7 +141,7 @@ }
// Wrap the original engine within the beacon-engine
engine := beacon.New(ethash.NewFaker())
- cache := &core.CacheConfig{TrieCleanLimit: 0}
+ cache := &core.CacheConfig{TrieCleanLimit: 0, StateScheme: scheme}
if snapshotter {
cache.SnapshotLimit = 1
cache.SnapshotWait = true
@@ -158,18 +180,20 @@ }
func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
return &core.Genesis{
- Config: config,
- Nonce: t.json.Genesis.Nonce.Uint64(),
- Timestamp: t.json.Genesis.Timestamp,
- ParentHash: t.json.Genesis.ParentHash,
- ExtraData: t.json.Genesis.ExtraData,
- GasLimit: t.json.Genesis.GasLimit,
- GasUsed: t.json.Genesis.GasUsed,
- Difficulty: t.json.Genesis.Difficulty,
- Mixhash: t.json.Genesis.MixHash,
- Coinbase: t.json.Genesis.Coinbase,
- Alloc: t.json.Pre,
- BaseFee: t.json.Genesis.BaseFeePerGas,
+ Config: config,
+ Nonce: t.json.Genesis.Nonce.Uint64(),
+ Timestamp: t.json.Genesis.Timestamp,
+ ParentHash: t.json.Genesis.ParentHash,
+ ExtraData: t.json.Genesis.ExtraData,
+ GasLimit: t.json.Genesis.GasLimit,
+ GasUsed: t.json.Genesis.GasUsed,
+ Difficulty: t.json.Genesis.Difficulty,
+ Mixhash: t.json.Genesis.MixHash,
+ Coinbase: t.json.Genesis.Coinbase,
+ Alloc: t.json.Pre,
+ BaseFee: t.json.Genesis.BaseFeePerGas,
+ BlobGasUsed: t.json.Genesis.BlobGasUsed,
+ ExcessBlobGas: t.json.Genesis.ExcessBlobGas,
}
}
@@ -277,6 +301,15 @@ return fmt.Errorf("baseFeePerGas: want: %v have: %v", h.BaseFeePerGas, h2.BaseFee)
}
if !reflect.DeepEqual(h.WithdrawalsRoot, h2.WithdrawalsHash) {
return fmt.Errorf("withdrawalsRoot: want: %v have: %v", h.WithdrawalsRoot, h2.WithdrawalsHash)
+ }
+ if !reflect.DeepEqual(h.BlobGasUsed, h2.BlobGasUsed) {
+ return fmt.Errorf("blobGasUsed: want: %v have: %v", h.BlobGasUsed, h2.BlobGasUsed)
+ }
+ if !reflect.DeepEqual(h.ExcessBlobGas, h2.ExcessBlobGas) {
+ return fmt.Errorf("excessBlobGas: want: %v have: %v", h.ExcessBlobGas, h2.ExcessBlobGas)
+ }
+ if !reflect.DeepEqual(h.ParentBeaconBlockRoot, h2.ParentBeaconRoot) {
+ return fmt.Errorf("parentBeaconBlockRoot: want: %v have: %v", h.ParentBeaconBlockRoot, h2.ParentBeaconRoot)
}
return nil
}
diff --git ethereum/go-ethereum/tests/fuzzers/les/les-fuzzer.go taikoxyz/taiko-geth/tests/fuzzers/les/les-fuzzer.go
index c62253a72622a9d8bbea14446f324f4ecbceb35e..c29bb2ef12283083ac84ad354ab991c15c1f9d99 100644
--- ethereum/go-ethereum/tests/fuzzers/les/les-fuzzer.go
+++ taikoxyz/taiko-geth/tests/fuzzers/les/les-fuzzer.go
@@ -88,8 +88,8 @@ return
}
func makeTries() (chtTrie *trie.Trie, bloomTrie *trie.Trie, chtKeys, bloomKeys [][]byte) {
- chtTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))
- bloomTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))
+ chtTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), trie.HashDefaults))
+ bloomTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), trie.HashDefaults))
for i := 0; i < testChainLen; i++ {
// The element in CHT is <big-endian block number> -> <block hash>
key := make([]byte, 8)
diff --git ethereum/go-ethereum/tests/fuzzers/rangeproof/rangeproof-fuzzer.go taikoxyz/taiko-geth/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
index aa81e5c9d9b13c128ceeb55991206e32b82232b7..c9d781553673f257ffc18dcb46e17d2aa73b52e9 100644
--- ethereum/go-ethereum/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
+++ taikoxyz/taiko-geth/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
@@ -56,7 +56,7 @@ return x
}
func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) {
- trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := make(map[string]*kv)
size := f.readInt()
// Fill it with some fluff
@@ -128,7 +128,7 @@ }
if len(keys) == 0 {
return 0
}
- var first, last = keys[0], keys[len(keys)-1]
+ var first = keys[0]
testcase %= 6
switch testcase {
case 0:
@@ -165,7 +165,7 @@ break
}
ok = 1
//nodes, subtrie
- hasMore, err := trie.VerifyRangeProof(tr.Hash(), first, last, keys, vals, proof)
+ hasMore, err := trie.VerifyRangeProof(tr.Hash(), first, keys, vals, proof)
if err != nil {
if hasMore {
panic("err != nil && hasMore == true")
diff --git ethereum/go-ethereum/tests/fuzzers/stacktrie/trie_fuzzer.go taikoxyz/taiko-geth/tests/fuzzers/stacktrie/trie_fuzzer.go
index 391bdf300b726b7d469e97522e6d50f3b5299266..20b8ca24b3d26294bd657b7f507c99073c2376ae 100644
--- ethereum/go-ethereum/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ taikoxyz/taiko-geth/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -136,12 +136,12 @@ func (f *fuzzer) fuzz() int {
// This spongeDb is used to check the sequence of disk-db-writes
var (
spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- dbA = trie.NewDatabase(rawdb.NewDatabase(spongeA))
+ dbA = trie.NewDatabase(rawdb.NewDatabase(spongeA), nil)
trieA = trie.NewEmpty(dbA)
spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB))
- trieB = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(spongeB, owner, path, hash, blob, dbB.Scheme())
+ dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB), nil)
+ trieB = trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme())
})
vals []kv
useful bool
@@ -205,12 +205,9 @@
// Ensure all the nodes are persisted correctly
var (
nodeset = make(map[string][]byte) // path -> blob
- trieC = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ trieC = trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
if crypto.Keccak256Hash(blob) != hash {
panic("invalid node blob")
- }
- if owner != (common.Hash{}) {
- panic("invalid node owner")
}
nodeset[string(path)] = common.CopyBytes(blob)
})
diff --git ethereum/go-ethereum/tests/fuzzers/trie/trie-fuzzer.go taikoxyz/taiko-geth/tests/fuzzers/trie/trie-fuzzer.go
index fe9bf3d0fd3ded2ff38a268db86006270b0cf4cf..687f5efb1ceafa78f9ae40dfe3393f3a8d442662 100644
--- ethereum/go-ethereum/tests/fuzzers/trie/trie-fuzzer.go
+++ taikoxyz/taiko-geth/tests/fuzzers/trie/trie-fuzzer.go
@@ -143,7 +143,7 @@ }
func runRandTest(rt randTest) error {
var (
- triedb = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ triedb = trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)
tr = trie.NewEmpty(triedb)
origin = types.EmptyRootHash
values = make(map[string]string) // tracks content of the trie
diff --git ethereum/go-ethereum/tests/fuzzers/txfetcher/txfetcher_fuzzer.go taikoxyz/taiko-geth/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
index 56b6b1e64eaa8625ce3c92e5f48d644dfe954ea5..8b501645b6631da52fc851acde8f3d5aa7754866 100644
--- ethereum/go-ethereum/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
+++ taikoxyz/taiko-geth/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
@@ -25,7 +25,6 @@ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
- "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/fetcher"
)
@@ -80,10 +79,11 @@ rand := rand.New(rand.NewSource(0x3a29)) // Same used in package tests!!!
f := fetcher.NewTxFetcherForTests(
func(common.Hash) bool { return false },
- func(txs []*txpool.Transaction) []error {
+ func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
+ nil,
clock, rand,
)
f.Start()
@@ -117,6 +117,8 @@
var (
announceIdxs = make([]int, announce)
announces = make([]common.Hash, announce)
+ types = make([]byte, announce)
+ sizes = make([]uint32, announce)
)
for i := 0; i < len(announces); i++ {
annBuf := make([]byte, 2)
@@ -125,11 +127,13 @@ return 0
}
announceIdxs[i] = (int(annBuf[0])*256 + int(annBuf[1])) % len(txs)
announces[i] = txs[announceIdxs[i]].Hash()
+ types[i] = txs[announceIdxs[i]].Type()
+ sizes[i] = uint32(txs[announceIdxs[i]].Size())
}
if verbose {
fmt.Println("Notify", peer, announceIdxs)
}
- if err := f.Notify(peer, announces); err != nil {
+ if err := f.Notify(peer, types, sizes, announces); err != nil {
panic(err)
}
diff --git ethereum/go-ethereum/tests/init.go taikoxyz/taiko-geth/tests/init.go
index a04e227dc7afe7e2ac10d79d9474f1c4d49bece4..99b7e4d33310197abddec8960d869f61818bb20b 100644
--- ethereum/go-ethereum/tests/init.go
+++ taikoxyz/taiko-geth/tests/init.go
@@ -318,6 +318,25 @@ TerminalTotalDifficulty: big.NewInt(0),
ShanghaiTime: u64(0),
CancunTime: u64(0),
},
+ "ShanghaiToCancunAtTime15k": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: big.NewInt(0),
+ MergeNetsplitBlock: big.NewInt(0),
+ TerminalTotalDifficulty: big.NewInt(0),
+ ShanghaiTime: u64(0),
+ CancunTime: u64(15_000),
+ },
}
// AvailableForks returns the set of defined fork names
diff --git ethereum/go-ethereum/tests/init_test.go taikoxyz/taiko-geth/tests/init_test.go
index 7d8743efcc71b1456dfe23d2f2f469c974ca3768..3ab15e76583212b37e598578c2da5681eb94fcb6 100644
--- ethereum/go-ethereum/tests/init_test.go
+++ taikoxyz/taiko-geth/tests/init_test.go
@@ -41,6 +41,7 @@ legacyStateTestDir = filepath.Join(baseDir, "LegacyTests", "Constantinople", "GeneralStateTests")
transactionTestDir = filepath.Join(baseDir, "TransactionTests")
rlpTestDir = filepath.Join(baseDir, "RLPTests")
difficultyTestDir = filepath.Join(baseDir, "BasicTests")
+ executionSpecDir = filepath.Join(".", "spec-tests", "fixtures")
benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks")
)
diff --git ethereum/go-ethereum/tests/state_test.go taikoxyz/taiko-geth/tests/state_test.go
index 782f1b0b4c294e4ae735067d35fe8d0d9850a45a..094dafcafd7a372b7f8331d14fe1b387bc5a7357 100644
--- ethereum/go-ethereum/tests/state_test.go
+++ taikoxyz/taiko-geth/tests/state_test.go
@@ -30,6 +30,8 @@ "time"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
@@ -78,21 +80,52 @@ for _, subtest := range test.Subtests() {
subtest := subtest
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
- t.Run(key+"/trie", func(t *testing.T) {
+ t.Run(key+"/hash/trie", func(t *testing.T) {
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- _, _, err := test.Run(subtest, vmconfig, false)
- return st.checkFailure(t, err)
+ var result error
+ test.Run(subtest, vmconfig, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
+ result = st.checkFailure(t, err)
+ })
+ return result
})
})
- t.Run(key+"/snap", func(t *testing.T) {
+ t.Run(key+"/hash/snap", func(t *testing.T) {
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- snaps, statedb, err := test.Run(subtest, vmconfig, true)
- if snaps != nil && statedb != nil {
- if _, err := snaps.Journal(statedb.IntermediateRoot(false)); err != nil {
- return err
+ var result error
+ test.Run(subtest, vmconfig, true, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
+ if snaps != nil && state != nil {
+ if _, err := snaps.Journal(state.IntermediateRoot(false)); err != nil {
+ result = err
+ return
+ }
}
- }
- return st.checkFailure(t, err)
+ result = st.checkFailure(t, err)
+ })
+ return result
+ })
+ })
+ t.Run(key+"/path/trie", func(t *testing.T) {
+ withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
+ var result error
+ test.Run(subtest, vmconfig, false, rawdb.PathScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
+ result = st.checkFailure(t, err)
+ })
+ return result
+ })
+ })
+ t.Run(key+"/path/snap", func(t *testing.T) {
+ withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
+ var result error
+ test.Run(subtest, vmconfig, true, rawdb.PathScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
+ if snaps != nil && state != nil {
+ if _, err := snaps.Journal(state.IntermediateRoot(false)); err != nil {
+ result = err
+ return
+ }
+ }
+ result = st.checkFailure(t, err)
+ })
+ return result
})
})
}
@@ -190,7 +223,8 @@ var rules = config.Rules(new(big.Int), false, 0)
vmconfig.ExtraEips = eips
block := t.genesis(config).ToBlock()
- _, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, false)
+ triedb, _, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, false, rawdb.HashScheme)
+ defer triedb.Close()
var baseFee *big.Int
if rules.IsLondon {
diff --git ethereum/go-ethereum/tests/state_test_util.go taikoxyz/taiko-geth/tests/state_test_util.go
index 42f0c662ee42eb4f5fc28b54a9cfcd5d5f0851b2..8c255c1b5bd23c1c3fa374fb997d57f596c3a56d 100644
--- ethereum/go-ethereum/tests/state_test_util.go
+++ taikoxyz/taiko-geth/tests/state_test_util.go
@@ -39,6 +39,8 @@ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"golang.org/x/crypto/sha3"
)
@@ -113,6 +115,7 @@ AccessLists []*types.AccessList `json:"accessLists,omitempty"`
GasLimit []uint64 `json:"gasLimit"`
Value []string `json:"value"`
PrivateKey []byte `json:"secretKey"`
+ Sender *common.Address `json:"sender"`
BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"`
BlobGasFeeCap *big.Int `json:"maxFeePerBlobGas,omitempty"`
}
@@ -187,43 +190,50 @@ return nil
}
// Run executes a specific subtest and verifies the post-state and logs
-func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, error) {
- snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter)
- if checkedErr := t.checkError(subtest, err); checkedErr != nil {
- return snaps, statedb, checkedErr
+func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string, postCheck func(err error, snaps *snapshot.Tree, state *state.StateDB)) (result error) {
+ triedb, snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter, scheme)
+
+ // Invoke the callback at the end of function for further analysis.
+ defer func() {
+ postCheck(result, snaps, statedb)
+
+ if triedb != nil {
+ triedb.Close()
+ }
+ }()
+ checkedErr := t.checkError(subtest, err)
+ if checkedErr != nil {
+ return checkedErr
}
// The error has been checked; if it was unexpected, it's already returned.
if err != nil {
// Here, an error exists but it was expected.
// We do not check the post state or logs.
- return snaps, statedb, nil
+ return nil
}
post := t.json.Post[subtest.Fork][subtest.Index]
// N.B: We need to do this in a two-step process, because the first Commit takes care
// of self-destructs, and we need to touch the coinbase _after_ it has potentially self-destructed.
if root != common.Hash(post.Root) {
- return snaps, statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
+ return fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
}
if logs := rlpHash(statedb.Logs()); logs != common.Hash(post.Logs) {
- return snaps, statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
- }
- // Re-init the post-state instance for further operation
- statedb, err = state.New(root, statedb.Database(), snaps)
- if err != nil {
- return nil, nil, err
+ return fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
}
- return snaps, statedb, nil
+ statedb, _ = state.New(root, statedb.Database(), snaps)
+ return nil
}
// RunNoVerify runs a specific subtest and returns the statedb and post-state root
-func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, common.Hash, error) {
+func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB, common.Hash, error) {
config, eips, err := GetChainConfig(subtest.Fork)
if err != nil {
- return nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
+ return nil, nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
}
vmconfig.ExtraEips = eips
+
block := t.genesis(config).ToBlock()
- snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
+ triedb, snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter, scheme)
var baseFee *big.Int
if config.IsLondon(new(big.Int)) {
@@ -237,7 +247,8 @@ }
post := t.json.Post[subtest.Fork][subtest.Index]
msg, err := t.json.Tx.toMessage(post, baseFee)
if err != nil {
- return nil, nil, common.Hash{}, err
+ triedb.Close()
+ return nil, nil, nil, common.Hash{}, err
}
// Try to recover tx with current signer
@@ -245,11 +256,13 @@ if len(post.TxBytes) != 0 {
var ttx types.Transaction
err := ttx.UnmarshalBinary(post.TxBytes)
if err != nil {
- return nil, nil, common.Hash{}, err
+ triedb.Close()
+ return nil, nil, nil, common.Hash{}, err
}
if _, err := types.Sender(types.LatestSigner(config), &ttx); err != nil {
- return nil, nil, common.Hash{}, err
+ triedb.Close()
+ return nil, nil, nil, common.Hash{}, err
}
}
@@ -268,6 +281,7 @@ context.Random = &rnd
context.Difficulty = big.NewInt(0)
}
evm := vm.NewEVM(context, txContext, statedb, config, vmconfig)
+
// Execute the message.
snapshot := statedb.Snapshot()
gaspool := new(core.GasPool)
@@ -282,17 +296,25 @@ // - the coinbase self-destructed, or
// - there are only 'bad' transactions, which aren't executed. In those cases,
// the coinbase gets no txfee, so isn't created, and thus needs to be touched
statedb.AddBalance(block.Coinbase(), new(big.Int))
- // Commit block
+
+ // Commit state mutations into database.
root, _ := statedb.Commit(block.NumberU64(), config.IsEIP158(block.Number()))
- return snaps, statedb, root, err
+ return triedb, snaps, statedb, root, err
}
func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]
}
-func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) {
- sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
+func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB) {
+ tconf := &trie.Config{Preimages: true}
+ if scheme == rawdb.HashScheme {
+ tconf.HashDB = hashdb.Defaults
+ } else {
+ tconf.PathDB = pathdb.Defaults
+ }
+ triedb := trie.NewDatabase(db, tconf)
+ sdb := state.NewDatabaseWithNodeDB(db, triedb)
statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
for addr, a := range accounts {
statedb.SetCode(addr, a.Code)
@@ -313,10 +335,10 @@ Recovery: false,
NoBuild: false,
AsyncBuild: false,
}
- snaps, _ = snapshot.New(snapconfig, db, sdb.TrieDB(), root)
+ snaps, _ = snapshot.New(snapconfig, db, triedb, root)
}
statedb, _ = state.New(root, sdb, snaps)
- return snaps, statedb
+ return triedb, snaps, statedb
}
func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis {
@@ -338,9 +360,12 @@ return genesis
}
func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (*core.Message, error) {
- // Derive sender from private key if present.
var from common.Address
- if len(tx.PrivateKey) > 0 {
+ // If 'sender' field is present, use that
+ if tx.Sender != nil {
+ from = *tx.Sender
+ } else if len(tx.PrivateKey) > 0 {
+ // Derive sender from private key if needed.
key, err := crypto.ToECDSA(tx.PrivateKey)
if err != nil {
return nil, fmt.Errorf("invalid private key: %v", err)
diff --git ethereum/go-ethereum/trie/database.go taikoxyz/taiko-geth/trie/database.go
index 49a884fd7f39124c666f393716cf0e802fe72090..1e59f0908f38712d7360534644b0005596948b31 100644
--- ethereum/go-ethereum/trie/database.go
+++ taikoxyz/taiko-geth/trie/database.go
@@ -21,6 +21,7 @@ "errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode"
@@ -29,12 +30,16 @@ )
// Config defines all necessary options for database.
type Config struct {
- Cache int // Memory allowance (MB) to use for caching trie nodes in memory
- Preimages bool // Flag whether the preimage of trie key is recorded
- PathDB *pathdb.Config // Configs for experimental path-based scheme, not used yet.
+ Preimages bool // Flag whether the preimage of node key is recorded
+ HashDB *hashdb.Config // Configs for hash-based scheme
+ PathDB *pathdb.Config // Configs for experimental path-based scheme
+}
- // Testing hooks
- OnCommit func(states *triestate.Set) // Hook invoked when commit is performed
+// HashDefaults represents a config for using hash-based scheme with
+// default settings.
+var HashDefaults = &Config{
+ Preimages: false,
+ HashDB: hashdb.Defaults,
}
// backend defines the methods needed to access/update trie nodes in different
@@ -47,9 +52,12 @@ // Initialized returns an indicator if the state data is already initialized
// according to the state scheme.
Initialized(genesisRoot common.Hash) bool
- // Size returns the current storage size of the memory cache in front of the
- // persistent database layer.
- Size() common.StorageSize
+ // Size returns the current storage size of the diff layers on top of the
+ // disk layer and the storage size of the nodes cached in the disk layer.
+ //
+ // For hash scheme, there is no differentiation between diff layer nodes
+ // and dirty disk layer nodes, so both are merged into the second return.
+ Size() (common.StorageSize, common.StorageSize)
// Update performs a state transition by committing dirty nodes contained
// in the given set in order to update state from the specified parent to
@@ -77,36 +85,30 @@ preimages *preimageStore // The store for caching preimages
backend backend // The backend for managing trie nodes
}
-// prepare initializes the database with provided configs, but the
-// database backend is still left as nil.
-func prepare(diskdb ethdb.Database, config *Config) *Database {
+// NewDatabase initializes the trie database with default settings, note
+// the legacy hash-based scheme is used by default.
+func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
+ // Sanitize the config and use the default one if it's not specified.
+ if config == nil {
+ config = HashDefaults
+ }
var preimages *preimageStore
- if config != nil && config.Preimages {
+ if config.Preimages {
preimages = newPreimageStore(diskdb)
}
- return &Database{
+ db := &Database{
config: config,
diskdb: diskdb,
preimages: preimages,
}
-}
-
-// NewDatabase initializes the trie database with default settings, namely
-// the legacy hash-based scheme is used by default.
-func NewDatabase(diskdb ethdb.Database) *Database {
- return NewDatabaseWithConfig(diskdb, nil)
-}
-
-// NewDatabaseWithConfig initializes the trie database with provided configs.
-// The path-based scheme is not activated yet, always initialized with legacy
-// hash-based scheme by default.
-func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
- var cleans int
- if config != nil && config.Cache != 0 {
- cleans = config.Cache * 1024 * 1024
+ if config.HashDB != nil && config.PathDB != nil {
+ log.Crit("Both 'hash' and 'path' mode are configured")
+ }
+ if config.PathDB != nil {
+ db.backend = pathdb.New(diskdb, config.PathDB)
+ } else {
+ db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{})
}
- db := prepare(diskdb, config)
- db.backend = hashdb.New(diskdb, cleans, mptResolver{})
return db
}
@@ -130,9 +132,6 @@ //
// The passed in maps(nodes, states) will be retained to avoid copying everything.
// Therefore, these maps must not be changed afterwards.
func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
- if db.config != nil && db.config.OnCommit != nil {
- db.config.OnCommit(states)
- }
if db.preimages != nil {
db.preimages.commit(false)
}
@@ -149,18 +148,19 @@ }
return db.backend.Commit(root, report)
}
-// Size returns the storage size of dirty trie nodes in front of the persistent
-// database and the size of cached preimages.
-func (db *Database) Size() (common.StorageSize, common.StorageSize) {
+// Size returns the storage size of diff layer nodes above the persistent disk
+// layer, the dirty nodes buffered within the disk layer, and the size of cached
+// preimages.
+func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize) {
var (
- storages common.StorageSize
- preimages common.StorageSize
+ diffs, nodes common.StorageSize
+ preimages common.StorageSize
)
- storages = db.backend.Size()
+ diffs, nodes = db.backend.Size()
if db.preimages != nil {
preimages = db.preimages.size()
}
- return storages, preimages
+ return diffs, nodes, preimages
}
// Initialized returns an indicator if the state data is already initialized
@@ -187,6 +187,15 @@ func (db *Database) WritePreimages() {
if db.preimages != nil {
db.preimages.commit(true)
}
+}
+
+// Preimage retrieves a cached trie node pre-image from memory. If it cannot be
+// found cached, the method queries the persistent database for the content.
+func (db *Database) Preimage(hash common.Hash) []byte {
+ if db.preimages == nil {
+ return nil
+ }
+ return db.preimages.preimage(hash)
}
// Cap iteratively flushes old but still referenced trie nodes until the total
@@ -240,3 +249,72 @@ return nil, errors.New("not supported")
}
return hdb.Node(hash)
}
+
+// Recover rollbacks the database to a specified historical point. The state is
+// supported as the rollback destination only if it's canonical state and the
+// corresponding trie histories are existent. It's only supported by path-based
+// database and will return an error for others.
+func (db *Database) Recover(target common.Hash) error {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ return pdb.Recover(target, &trieLoader{db: db})
+}
+
+// Recoverable returns the indicator if the specified state is enabled to be
+// recovered. It's only supported by path-based database and will return an
+// error for others.
+func (db *Database) Recoverable(root common.Hash) (bool, error) {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return false, errors.New("not supported")
+ }
+ return pdb.Recoverable(root), nil
+}
+
+// Disable deactivates the database and invalidates all available state layers
+// as stale to prevent access to the persistent state, which is in the syncing
+// stage.
+//
+// It's only supported by path-based database and will return an error for others.
+func (db *Database) Disable() error {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ return pdb.Disable()
+}
+
+// Enable activates database and resets the state tree with the provided persistent
+// state root once the state sync is finished.
+func (db *Database) Enable(root common.Hash) error {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ return pdb.Enable(root)
+}
+
+// Journal commits an entire diff hierarchy to disk into a single journal entry.
+// This is meant to be used during shutdown to persist the snapshot without
+// flattening everything down (bad for reorgs). It's only supported by path-based
+// database and will return an error for others.
+func (db *Database) Journal(root common.Hash) error {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ return pdb.Journal(root)
+}
+
+// SetBufferSize sets the node buffer size to the provided value(in bytes).
+// It's only supported by path-based database and will return an error for
+// others.
+func (db *Database) SetBufferSize(size int) error {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ return pdb.SetBufferSize(size)
+}
diff --git ethereum/go-ethereum/trie/database_test.go taikoxyz/taiko-geth/trie/database_test.go
index ed43a81e5976e8196bf8a367cbbe75986bffc942..d508c6553319ab6a9ff197e36a992aa103fe69af 100644
--- ethereum/go-ethereum/trie/database_test.go
+++ taikoxyz/taiko-geth/trie/database_test.go
@@ -25,11 +25,16 @@ )
// newTestDatabase initializes the trie database with specified scheme.
func newTestDatabase(diskdb ethdb.Database, scheme string) *Database {
- db := prepare(diskdb, nil)
+ config := &Config{Preimages: false}
if scheme == rawdb.HashScheme {
- db.backend = hashdb.New(diskdb, 0, mptResolver{})
+ config.HashDB = &hashdb.Config{
+ CleanCacheSize: 0,
+ } // disable clean cache
} else {
- db.backend = pathdb.New(diskdb, &pathdb.Config{}) // disable clean/dirty cache
+ config.PathDB = &pathdb.Config{
+ CleanCacheSize: 0,
+ DirtyCacheSize: 0,
+ } // disable clean/dirty cache
}
- return db
+ return NewDatabase(diskdb, config)
}
diff --git ethereum/go-ethereum/trie/encoding.go taikoxyz/taiko-geth/trie/encoding.go
index 8ee0022ef3a09d5902b9610ac6ceb4e7191782d9..3284d3f8f0218e299efb82730d3375767f1c7bd3 100644
--- ethereum/go-ethereum/trie/encoding.go
+++ taikoxyz/taiko-geth/trie/encoding.go
@@ -51,9 +51,8 @@ decodeNibbles(hex, buf[1:])
return buf
}
-// hexToCompactInPlace places the compact key in input buffer, returning the length
-// needed for the representation
-func hexToCompactInPlace(hex []byte) int {
+// hexToCompactInPlace places the compact key in input buffer, returning the compacted key.
+func hexToCompactInPlace(hex []byte) []byte {
var (
hexLen = len(hex) // length of the hex input
firstByte = byte(0)
@@ -77,7 +76,7 @@ for ; ni < hexLen; bi, ni = bi+1, ni+2 {
hex[bi] = hex[ni]<<4 | hex[ni+1]
}
hex[0] = firstByte
- return binLen
+ return hex[:binLen]
}
func compactToHex(compact []byte) []byte {
diff --git ethereum/go-ethereum/trie/encoding_test.go taikoxyz/taiko-geth/trie/encoding_test.go
index d16d25c359c7741f63f18fc0cdaa031765169441..ac50b5d025e56d8fcf1cc3a9e2f8081657b51c43 100644
--- ethereum/go-ethereum/trie/encoding_test.go
+++ taikoxyz/taiko-geth/trie/encoding_test.go
@@ -86,8 +86,7 @@ "10",
} {
hexBytes, _ := hex.DecodeString(key)
exp := hexToCompact(hexBytes)
- sz := hexToCompactInPlace(hexBytes)
- got := hexBytes[:sz]
+ got := hexToCompactInPlace(hexBytes)
if !bytes.Equal(exp, got) {
t.Fatalf("test %d: encoding err\ninp %v\ngot %x\nexp %x\n", i, key, got, exp)
}
@@ -102,8 +101,7 @@ crand.Read(key)
hexBytes := keybytesToHex(key)
hexOrig := []byte(string(hexBytes))
exp := hexToCompact(hexBytes)
- sz := hexToCompactInPlace(hexBytes)
- got := hexBytes[:sz]
+ got := hexToCompactInPlace(hexBytes)
if !bytes.Equal(exp, got) {
t.Fatalf("encoding err \ncpt %x\nhex %x\ngot %x\nexp %x\n",
@@ -116,6 +114,13 @@ func BenchmarkHexToCompact(b *testing.B) {
testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
for i := 0; i < b.N; i++ {
hexToCompact(testBytes)
+ }
+}
+
+func BenchmarkHexToCompactInPlace(b *testing.B) {
+ testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
+ for i := 0; i < b.N; i++ {
+ hexToCompactInPlace(testBytes)
}
}
diff --git ethereum/go-ethereum/trie/iterator_test.go taikoxyz/taiko-geth/trie/iterator_test.go
index e711ffb8158356eb88017572e678d1f80b03fd27..57d1f06a160a3ec851f8ec0db9ab5cd71038d406 100644
--- ethereum/go-ethereum/trie/iterator_test.go
+++ taikoxyz/taiko-geth/trie/iterator_test.go
@@ -18,7 +18,6 @@ package trie
import (
"bytes"
- "encoding/binary"
"fmt"
"math/rand"
"testing"
@@ -27,13 +26,11 @@ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/trie/trienode"
)
func TestEmptyIterator(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
iter := trie.MustNodeIterator(nil)
seen := make(map[string]struct{})
@@ -46,7 +43,7 @@ }
}
func TestIterator(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
+ db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(db)
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -89,7 +86,7 @@ return bytes.Compare(k.k, other.k)
}
func TestIteratorLargeData(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := make(map[string]*kv)
for i := byte(0); i < 255; i++ {
@@ -208,7 +205,7 @@ {"jars", "d"},
}
func TestIteratorSeek(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for _, val := range testdata1 {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
@@ -249,7 +246,7 @@ return nil
}
func TestDifferenceIterator(t *testing.T) {
- dba := NewDatabase(rawdb.NewMemoryDatabase())
+ dba := NewDatabase(rawdb.NewMemoryDatabase(), nil)
triea := NewEmpty(dba)
for _, val := range testdata1 {
triea.MustUpdate([]byte(val.k), []byte(val.v))
@@ -258,7 +255,7 @@ rootA, nodesA, _ := triea.Commit(false)
dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
- dbb := NewDatabase(rawdb.NewMemoryDatabase())
+ dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trieb := NewEmpty(dbb)
for _, val := range testdata2 {
trieb.MustUpdate([]byte(val.k), []byte(val.v))
@@ -291,7 +288,7 @@ }
}
func TestUnionIterator(t *testing.T) {
- dba := NewDatabase(rawdb.NewMemoryDatabase())
+ dba := NewDatabase(rawdb.NewMemoryDatabase(), nil)
triea := NewEmpty(dba)
for _, val := range testdata1 {
triea.MustUpdate([]byte(val.k), []byte(val.v))
@@ -300,7 +297,7 @@ rootA, nodesA, _ := triea.Commit(false)
dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
- dbb := NewDatabase(rawdb.NewMemoryDatabase())
+ dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trieb := NewEmpty(dbb)
for _, val := range testdata2 {
trieb.MustUpdate([]byte(val.k), []byte(val.v))
@@ -344,7 +341,7 @@ }
}
func TestIteratorNoDups(t *testing.T) {
- tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for _, val := range testdata1 {
tr.MustUpdate([]byte(val.k), []byte(val.v))
}
@@ -537,96 +534,6 @@ testIteratorNodeBlob(t, rawdb.HashScheme)
testIteratorNodeBlob(t, rawdb.PathScheme)
}
-type loggingDb struct {
- getCount uint64
- backend ethdb.KeyValueStore
-}
-
-func (l *loggingDb) Has(key []byte) (bool, error) {
- return l.backend.Has(key)
-}
-
-func (l *loggingDb) Get(key []byte) ([]byte, error) {
- l.getCount++
- return l.backend.Get(key)
-}
-
-func (l *loggingDb) Put(key []byte, value []byte) error {
- return l.backend.Put(key, value)
-}
-
-func (l *loggingDb) Delete(key []byte) error {
- return l.backend.Delete(key)
-}
-
-func (l *loggingDb) NewBatch() ethdb.Batch {
- return l.backend.NewBatch()
-}
-
-func (l *loggingDb) NewBatchWithSize(size int) ethdb.Batch {
- return l.backend.NewBatchWithSize(size)
-}
-
-func (l *loggingDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
- return l.backend.NewIterator(prefix, start)
-}
-
-func (l *loggingDb) NewSnapshot() (ethdb.Snapshot, error) {
- return l.backend.NewSnapshot()
-}
-
-func (l *loggingDb) Stat(property string) (string, error) {
- return l.backend.Stat(property)
-}
-
-func (l *loggingDb) Compact(start []byte, limit []byte) error {
- return l.backend.Compact(start, limit)
-}
-
-func (l *loggingDb) Close() error {
- return l.backend.Close()
-}
-
-// makeLargeTestTrie create a sample test trie
-func makeLargeTestTrie() (*Database, *StateTrie, *loggingDb) {
- // Create an empty trie
- logDb := &loggingDb{0, memorydb.New()}
- triedb := NewDatabase(rawdb.NewDatabase(logDb))
- trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb)
-
- // Fill it with some arbitrary data
- for i := 0; i < 10000; i++ {
- key := make([]byte, 32)
- val := make([]byte, 32)
- binary.BigEndian.PutUint64(key, uint64(i))
- binary.BigEndian.PutUint64(val, uint64(i))
- key = crypto.Keccak256(key)
- val = crypto.Keccak256(val)
- trie.MustUpdate(key, val)
- }
- root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
- triedb.Commit(root, false)
-
- // Return the generated trie
- trie, _ = NewStateTrie(TrieID(root), triedb)
- return triedb, trie, logDb
-}
-
-// Tests that the node iterator indeed walks over the entire database contents.
-func TestNodeIteratorLargeTrie(t *testing.T) {
- // Create some arbitrary test trie to iterate
- db, trie, logDb := makeLargeTestTrie()
- db.Cap(0) // flush everything
- // Do a seek operation
- trie.NodeIterator(common.FromHex("0x77667766776677766778855885885885"))
- // master: 24 get operations
- // this pr: 6 get operations
- if have, want := logDb.getCount, uint64(6); have != want {
- t.Fatalf("Too many lookups during seek, have %d want %d", have, want)
- }
-}
-
func testIteratorNodeBlob(t *testing.T, scheme string) {
var (
db = rawdb.NewMemoryDatabase()
@@ -700,7 +607,7 @@ return false, nil, common.Hash{}
}
hash = common.BytesToHash(key)
} else {
- ok, remain := rawdb.IsAccountTrieNode(key)
+ ok, remain := rawdb.ResolveAccountTrieNodeKey(key)
if !ok {
return false, nil, common.Hash{}
}
diff --git ethereum/go-ethereum/trie/proof.go taikoxyz/taiko-geth/trie/proof.go
index a463c80b4889bf3354b3c953c68ba1dc3f8620bd..a526a534025300bd1c78a69fffd1103d5574df75 100644
--- ethereum/go-ethereum/trie/proof.go
+++ taikoxyz/taiko-geth/trie/proof.go
@@ -481,7 +481,7 @@ //
// Note: This method does not verify that the proof is of minimal form. If the input
// proofs are 'bloated' with neighbour leaves or random data, aside from the 'useful'
// data, then the proof will still be accepted.
-func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (bool, error) {
+func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (bool, error) {
if len(keys) != len(values) {
return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
}
@@ -520,6 +520,7 @@ return false, errors.New("more entries available")
}
return false, nil
}
+ var lastKey = keys[len(keys)-1]
// Special case, there is only one element and two edge keys are same.
// In this case, we can't construct two edge paths. So handle it here.
if len(keys) == 1 && bytes.Equal(firstKey, lastKey) {
diff --git ethereum/go-ethereum/trie/proof_test.go taikoxyz/taiko-geth/trie/proof_test.go
index e8ea116c8803bfdc6b9d8c829f0e6ecb09b0e2d0..59ae201cea162f72e43060aff002c1dfddd08864 100644
--- ethereum/go-ethereum/trie/proof_test.go
+++ taikoxyz/taiko-geth/trie/proof_test.go
@@ -94,7 +94,7 @@ }
}
func TestOneElementProof(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
updateString(trie, "k", "v")
for i, prover := range makeProvers(trie) {
proof := prover([]byte("k"))
@@ -145,7 +145,7 @@
// Tests that missing keys can also be proven. The test explicitly uses a single
// entry trie and checks for missing keys both before and after the single entry.
func TestMissingKeyProof(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
updateString(trie, "k", "v")
for i, key := range []string{"a", "j", "l", "z"} {
@@ -191,7 +191,7 @@ for i := start; i < end; i++ {
keys = append(keys, entries[i].k)
vals = append(vals, entries[i].v)
}
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof)
+ _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
if err != nil {
t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
}
@@ -221,19 +221,10 @@ // Short circuit if the decreased key is underflow
if bytes.Compare(first, entries[start].k) > 0 {
continue
}
- // Short circuit if the increased key is same with the next key
- last := increaseKey(common.CopyBytes(entries[end-1].k))
- if end != len(entries) && bytes.Equal(last, entries[end].k) {
- continue
- }
- // Short circuit if the increased key is overflow
- if bytes.Compare(last, entries[end-1].k) < 0 {
- continue
- }
if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, proof); err != nil {
+ if err := trie.Prove(entries[end-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
var keys [][]byte
@@ -242,36 +233,15 @@ for i := start; i < end; i++ {
keys = append(keys, entries[i].k)
vals = append(vals, entries[i].v)
}
- _, err := VerifyRangeProof(trie.Hash(), first, last, keys, vals, proof)
+ _, err := VerifyRangeProof(trie.Hash(), first, keys, vals, proof)
if err != nil {
t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
}
}
- // Special case, two edge proofs for two edge key.
- proof := memorydb.New()
- first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
- last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
- if err := trie.Prove(first, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(last, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- var k [][]byte
- var v [][]byte
- for i := 0; i < len(entries); i++ {
- k = append(k, entries[i].k)
- v = append(v, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), first, last, k, v, proof)
- if err != nil {
- t.Fatal("Failed to verify whole rang with non-existent edges")
- }
}
// TestRangeProofWithInvalidNonExistentProof tests such scenarios:
// - There exists a gap between the first element and the left edge proof
-// - There exists a gap between the last element and the right edge proof
func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
trie, vals := randomTrie(4096)
var entries []*kv
@@ -298,29 +268,7 @@ for i := start; i < end; i++ {
k = append(k, entries[i].k)
v = append(v, entries[i].v)
}
- _, err := VerifyRangeProof(trie.Hash(), first, k[len(k)-1], k, v, proof)
- if err == nil {
- t.Fatalf("Expected to detect the error, got nil")
- }
-
- // Case 2
- start, end = 100, 200
- last := increaseKey(common.CopyBytes(entries[end-1].k))
- proof = memorydb.New()
- if err := trie.Prove(entries[start].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- if err := trie.Prove(last, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- end = 195 // Capped slice
- k = make([][]byte, 0)
- v = make([][]byte, 0)
- for i := start; i < end; i++ {
- k = append(k, entries[i].k)
- v = append(v, entries[i].v)
- }
- _, err = VerifyRangeProof(trie.Hash(), k[0], last, k, v, proof)
+ _, err := VerifyRangeProof(trie.Hash(), first, k, v, proof)
if err == nil {
t.Fatalf("Expected to detect the error, got nil")
}
@@ -344,7 +292,7 @@ proof := memorydb.New()
if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- _, err := VerifyRangeProof(trie.Hash(), entries[start].k, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
+ _, err := VerifyRangeProof(trie.Hash(), entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
@@ -359,7 +307,7 @@ }
if err := trie.Prove(entries[start].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
- _, err = VerifyRangeProof(trie.Hash(), first, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
+ _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
@@ -374,7 +322,7 @@ }
if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
- _, err = VerifyRangeProof(trie.Hash(), entries[start].k, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
+ _, err = VerifyRangeProof(trie.Hash(), entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
@@ -389,13 +337,13 @@ }
if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
- _, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
+ _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
// Test the mini trie with only a single element.
- tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
entry := &kv{randBytes(32), randBytes(20), false}
tinyTrie.MustUpdate(entry.k, entry.v)
@@ -408,7 +356,7 @@ }
if err := tinyTrie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
- _, err = VerifyRangeProof(tinyTrie.Hash(), first, last, [][]byte{entry.k}, [][]byte{entry.v}, proof)
+ _, err = VerifyRangeProof(tinyTrie.Hash(), first, [][]byte{entry.k}, [][]byte{entry.v}, proof)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
@@ -430,7 +378,7 @@ for i := 0; i < len(entries); i++ {
k = append(k, entries[i].k)
v = append(v, entries[i].v)
}
- _, err := VerifyRangeProof(trie.Hash(), nil, nil, k, v, nil)
+ _, err := VerifyRangeProof(trie.Hash(), nil, k, v, nil)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
@@ -443,7 +391,7 @@ }
if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
- _, err = VerifyRangeProof(trie.Hash(), k[0], k[len(k)-1], k, v, proof)
+ _, err = VerifyRangeProof(trie.Hash(), k[0], k, v, proof)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
@@ -451,14 +399,13 @@
// Even with non-existent edge proofs, it should still work.
proof = memorydb.New()
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
- last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, proof); err != nil {
+ if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
- _, err = VerifyRangeProof(trie.Hash(), first, last, k, v, proof)
+ _, err = VerifyRangeProof(trie.Hash(), first, k, v, proof)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
@@ -467,7 +414,7 @@
// TestSingleSideRangeProof tests the range starts from zero.
func TestSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
var entries []*kv
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -491,43 +438,7 @@ for i := 0; i <= pos; i++ {
k = append(k, entries[i].k)
v = append(v, entries[i].v)
}
- _, err := VerifyRangeProof(trie.Hash(), common.Hash{}.Bytes(), k[len(k)-1], k, v, proof)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- }
- }
-}
-
-// TestReverseSingleSideRangeProof tests the range ends with 0xffff...fff.
-func TestReverseSingleSideRangeProof(t *testing.T) {
- for i := 0; i < 64; i++ {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- var entries []*kv
- for i := 0; i < 4096; i++ {
- value := &kv{randBytes(32), randBytes(20), false}
- trie.MustUpdate(value.k, value.v)
- entries = append(entries, value)
- }
- slices.SortFunc(entries, (*kv).cmp)
-
- var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1}
- for _, pos := range cases {
- proof := memorydb.New()
- if err := trie.Prove(entries[pos].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- if err := trie.Prove(last.Bytes(), proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
- }
- k := make([][]byte, 0)
- v := make([][]byte, 0)
- for i := pos; i < len(entries); i++ {
- k = append(k, entries[i].k)
- v = append(v, entries[i].v)
- }
- _, err := VerifyRangeProof(trie.Hash(), k[0], last.Bytes(), k, v, proof)
+ _, err := VerifyRangeProof(trie.Hash(), common.Hash{}.Bytes(), k, v, proof)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
@@ -561,7 +472,7 @@ for i := start; i < end; i++ {
keys = append(keys, entries[i].k)
vals = append(vals, entries[i].v)
}
- var first, last = keys[0], keys[len(keys)-1]
+ var first = keys[0]
testcase := mrand.Intn(6)
var index int
switch testcase {
@@ -576,7 +487,7 @@ vals[index] = randBytes(20) // In theory it can't be same
case 2:
// Gapped entry slice
index = mrand.Intn(end - start)
- if (index == 0 && start < 100) || (index == end-start-1 && end <= 100) {
+ if (index == 0 && start < 100) || (index == end-start-1) {
continue
}
keys = append(keys[:index], keys[index+1:]...)
@@ -599,7 +510,7 @@ // Set random value to nil, deletion
index = mrand.Intn(end - start)
vals[index] = nil
}
- _, err := VerifyRangeProof(trie.Hash(), first, last, keys, vals, proof)
+ _, err := VerifyRangeProof(trie.Hash(), first, keys, vals, proof)
if err == nil {
t.Fatalf("%d Case %d index %d range: (%d->%d) expect error, got nil", i, testcase, index, start, end-1)
}
@@ -609,7 +520,7 @@
// TestGappedRangeProof focuses on the small trie with embedded nodes.
// If the gapped node is embedded in the trie, it should be detected too.
func TestGappedRangeProof(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
var entries []*kv // Sorted entries
for i := byte(0); i < 10; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@@ -633,7 +544,7 @@ }
keys = append(keys, entries[i].k)
vals = append(vals, entries[i].v)
}
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof)
+ _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
if err == nil {
t.Fatal("expect error, got nil")
}
@@ -649,24 +560,22 @@ }
slices.SortFunc(entries, (*kv).cmp)
pos := 1000
- first := decreaseKey(common.CopyBytes(entries[pos].k))
- first = decreaseKey(first)
- last := decreaseKey(common.CopyBytes(entries[pos].k))
+ first := common.CopyBytes(entries[0].k)
proof := memorydb.New()
if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- if err := trie.Prove(last, proof); err != nil {
- t.Fatalf("Failed to prove the last node %v", err)
+ if err := trie.Prove(entries[2000].k, proof); err != nil {
+ t.Fatalf("Failed to prove the first node %v", err)
}
- _, err := VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
+ _, err := VerifyRangeProof(trie.Hash(), first, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
if err == nil {
t.Fatalf("Expected error, got nil")
}
first = increaseKey(common.CopyBytes(entries[pos].k))
- last = increaseKey(common.CopyBytes(entries[pos].k))
+ last := increaseKey(common.CopyBytes(entries[pos].k))
last = increaseKey(last)
proof = memorydb.New()
@@ -676,14 +585,14 @@ }
if err := trie.Prove(last, proof); err != nil {
t.Fatalf("Failed to prove the last node %v", err)
}
- _, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
+ _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
if err == nil {
t.Fatalf("Expected error, got nil")
}
}
func TestHasRightElement(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
var entries []*kv
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -703,15 +612,12 @@ {0, 10, true},
{50, 100, true},
{50, len(entries), false}, // No more element expected
{len(entries) - 1, len(entries), false}, // Single last element with two existent proofs(point to same key)
- {len(entries) - 1, -1, false}, // Single last element with non-existent right proof
{0, len(entries), false}, // The whole set with existent left proof
{-1, len(entries), false}, // The whole set with non-existent left proof
- {-1, -1, false}, // The whole set with non-existent left/right proof
}
for _, c := range cases {
var (
firstKey []byte
- lastKey []byte
start = c.start
end = c.end
proof = memorydb.New()
@@ -727,16 +633,8 @@ if err := trie.Prove(entries[c.start].k, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
}
- if c.end == -1 {
- lastKey, end = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), len(entries)
- if err := trie.Prove(lastKey, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
- } else {
- lastKey = entries[c.end-1].k
- if err := trie.Prove(entries[c.end-1].k, proof); err != nil {
- t.Fatalf("Failed to prove the first node %v", err)
- }
+ if err := trie.Prove(entries[c.end-1].k, proof); err != nil {
+ t.Fatalf("Failed to prove the first node %v", err)
}
k := make([][]byte, 0)
v := make([][]byte, 0)
@@ -744,7 +642,7 @@ for i := start; i < end; i++ {
k = append(k, entries[i].k)
v = append(v, entries[i].v)
}
- hasMore, err := VerifyRangeProof(trie.Hash(), firstKey, lastKey, k, v, proof)
+ hasMore, err := VerifyRangeProof(trie.Hash(), firstKey, k, v, proof)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
@@ -777,7 +675,7 @@ first := increaseKey(common.CopyBytes(entries[c.pos].k))
if err := trie.Prove(first, proof); err != nil {
t.Fatalf("Failed to prove the first node %v", err)
}
- _, err := VerifyRangeProof(trie.Hash(), first, nil, nil, nil, proof)
+ _, err := VerifyRangeProof(trie.Hash(), first, nil, nil, proof)
if c.err && err == nil {
t.Fatalf("Expected error, got nil")
}
@@ -817,7 +715,7 @@ want := memorydb.New()
trie.Prove(keys[0], want)
trie.Prove(keys[len(keys)-1], want)
- if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof); err != nil {
+ if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof); err != nil {
t.Fatalf("expected bloated proof to succeed, got %v", err)
}
}
@@ -860,7 +758,7 @@ for i := start; i < end; i++ {
keys = append(keys, entries[i].k)
vals = append(vals, entries[i].v)
}
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof)
+ _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
if err == nil {
t.Fatalf("Expected failure on noop entry")
}
@@ -895,7 +793,7 @@ for i := 0; i < len(entries); i++ {
keys = append(keys, entries[i].k)
vals = append(vals, entries[i].v)
}
- _, err := VerifyRangeProof(trie.Hash(), nil, nil, keys, vals, nil)
+ _, err := VerifyRangeProof(trie.Hash(), nil, keys, vals, nil)
if err == nil {
t.Fatalf("Expected failure on noop entry")
}
@@ -1001,7 +899,7 @@ }
b.ResetTimer()
for i := 0; i < b.N; i++ {
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, values, proof)
+ _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, values, proof)
if err != nil {
b.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
}
@@ -1028,7 +926,7 @@ values = append(values, entry.v)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
- _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, values, nil)
+ _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, values, nil)
if err != nil {
b.Fatalf("Expected no error, got %v", err)
}
@@ -1036,7 +934,7 @@ }
}
func randomTrie(n int) (*Trie, map[string]*kv) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := make(map[string]*kv)
for i := byte(0); i < 100; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@@ -1055,7 +953,7 @@ return trie, vals
}
func nonRandomTrie(n int) (*Trie, map[string]*kv) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := make(map[string]*kv)
max := uint64(0xffffffffffffffff)
for i := uint64(0); i < uint64(n); i++ {
@@ -1080,22 +978,21 @@ vals := [][]byte{
common.Hex2Bytes("02"),
common.Hex2Bytes("03"),
}
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for i, key := range keys {
trie.MustUpdate(key, vals[i])
}
root := trie.Hash()
proof := memorydb.New()
start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
- end := common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
if err := trie.Prove(start, proof); err != nil {
t.Fatalf("failed to prove start: %v", err)
}
- if err := trie.Prove(end, proof); err != nil {
+ if err := trie.Prove(keys[len(keys)-1], proof); err != nil {
t.Fatalf("failed to prove end: %v", err)
}
- more, err := VerifyRangeProof(root, start, end, keys, vals, proof)
+ more, err := VerifyRangeProof(root, start, keys, vals, proof)
if err != nil {
t.Fatalf("failed to verify range proof: %v", err)
}
diff --git ethereum/go-ethereum/trie/secure_trie_test.go taikoxyz/taiko-geth/trie/secure_trie_test.go
index a610ca2fd667d2441c2ca4fc644d7273d0a3f376..2087866d3855f1403db6dddf4f28aa01172e1fa8 100644
--- ethereum/go-ethereum/trie/secure_trie_test.go
+++ taikoxyz/taiko-geth/trie/secure_trie_test.go
@@ -31,14 +31,14 @@ "github.com/ethereum/go-ethereum/trie/trienode"
)
func newEmptySecure() *StateTrie {
- trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), NewDatabase(rawdb.NewMemoryDatabase()))
+ trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), NewDatabase(rawdb.NewMemoryDatabase(), nil))
return trie
}
// makeTestStateTrie creates a large enough secure trie for testing.
func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) {
// Create an empty trie
- triedb := NewDatabase(rawdb.NewMemoryDatabase())
+ triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb)
// Fill it with some arbitrary data
diff --git ethereum/go-ethereum/trie/stacktrie.go taikoxyz/taiko-geth/trie/stacktrie.go
index ee1ce282918d6dd24ff2306a35ae1e9c678bda3d..35208e1cb3453c4e2cf60b23d16c680a8884ab74 100644
--- ethereum/go-ethereum/trie/stacktrie.go
+++ taikoxyz/taiko-geth/trie/stacktrie.go
@@ -17,11 +17,7 @@
package trie
import (
- "bufio"
- "bytes"
- "encoding/gob"
"errors"
- "io"
"sync"
"github.com/ethereum/go-ethereum/common"
@@ -29,171 +25,87 @@ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
-var ErrCommitDisabled = errors.New("no database for committing")
-
-var stPool = sync.Pool{
- New: func() interface{} {
- return NewStackTrie(nil)
- },
-}
+var (
+ ErrCommitDisabled = errors.New("no database for committing")
+ stPool = sync.Pool{New: func() any { return new(stNode) }}
+ _ = types.TrieHasher((*StackTrie)(nil))
+)
// NodeWriteFunc is used to provide all information of a dirty node for committing
// so that callers can flush nodes into database with desired scheme.
-type NodeWriteFunc = func(owner common.Hash, path []byte, hash common.Hash, blob []byte)
-
-func stackTrieFromPool(writeFn NodeWriteFunc, owner common.Hash) *StackTrie {
- st := stPool.Get().(*StackTrie)
- st.owner = owner
- st.writeFn = writeFn
- return st
-}
-
-func returnToPool(st *StackTrie) {
- st.Reset()
- stPool.Put(st)
-}
+type NodeWriteFunc = func(path []byte, hash common.Hash, blob []byte)
// StackTrie is a trie implementation that expects keys to be inserted
// in order. Once it determines that a subtree will no longer be inserted
// into, it will hash it and free up the memory it uses.
type StackTrie struct {
- owner common.Hash // the owner of the trie
- nodeType uint8 // node type (as in branch, ext, leaf)
- val []byte // value contained by this node if it's a leaf
- key []byte // key chunk covered by this (leaf|ext) node
- children [16]*StackTrie // list of children (for branch and exts)
- writeFn NodeWriteFunc // function for committing nodes, can be nil
+ writeFn NodeWriteFunc // function for committing nodes, can be nil
+ root *stNode
+ h *hasher
}
// NewStackTrie allocates and initializes an empty trie.
func NewStackTrie(writeFn NodeWriteFunc) *StackTrie {
return &StackTrie{
- nodeType: emptyNode,
- writeFn: writeFn,
+ writeFn: writeFn,
+ root: stPool.Get().(*stNode),
+ h: newHasher(false),
}
}
-// NewStackTrieWithOwner allocates and initializes an empty trie, but with
-// the additional owner field.
-func NewStackTrieWithOwner(writeFn NodeWriteFunc, owner common.Hash) *StackTrie {
- return &StackTrie{
- owner: owner,
- nodeType: emptyNode,
- writeFn: writeFn,
+// Update inserts a (key, value) pair into the stack trie.
+func (t *StackTrie) Update(key, value []byte) error {
+ k := keybytesToHex(key)
+ if len(value) == 0 {
+ panic("deletion not supported")
}
+ t.insert(t.root, k[:len(k)-1], value, nil)
+ return nil
}
-// NewFromBinary initialises a serialized stacktrie with the given db.
-func NewFromBinary(data []byte, writeFn NodeWriteFunc) (*StackTrie, error) {
- var st StackTrie
- if err := st.UnmarshalBinary(data); err != nil {
- return nil, err
- }
- // If a database is used, we need to recursively add it to every child
- if writeFn != nil {
- st.setWriter(writeFn)
+// MustUpdate is a wrapper of Update and will omit any encountered error but
+// just print out an error message.
+func (t *StackTrie) MustUpdate(key, value []byte) {
+ if err := t.Update(key, value); err != nil {
+ log.Error("Unhandled trie error in StackTrie.Update", "err", err)
}
- return &st, nil
}
-// MarshalBinary implements encoding.BinaryMarshaler
-func (st *StackTrie) MarshalBinary() (data []byte, err error) {
- var (
- b bytes.Buffer
- w = bufio.NewWriter(&b)
- )
- if err := gob.NewEncoder(w).Encode(struct {
- Owner common.Hash
- NodeType uint8
- Val []byte
- Key []byte
- }{
- st.owner,
- st.nodeType,
- st.val,
- st.key,
- }); err != nil {
- return nil, err
- }
- for _, child := range st.children {
- if child == nil {
- w.WriteByte(0)
- continue
- }
- w.WriteByte(1)
- if childData, err := child.MarshalBinary(); err != nil {
- return nil, err
- } else {
- w.Write(childData)
- }
- }
- w.Flush()
- return b.Bytes(), nil
+func (t *StackTrie) Reset() {
+ t.writeFn = nil
+ t.root = stPool.Get().(*stNode)
}
-// UnmarshalBinary implements encoding.BinaryUnmarshaler
-func (st *StackTrie) UnmarshalBinary(data []byte) error {
- r := bytes.NewReader(data)
- return st.unmarshalBinary(r)
+// stNode represents a node within a StackTrie
+type stNode struct {
+ typ uint8 // node type (as in branch, ext, leaf)
+ key []byte // key chunk covered by this (leaf|ext) node
+ val []byte // value contained by this node if it's a leaf
+ children [16]*stNode // list of children (for branch and exts)
}
-func (st *StackTrie) unmarshalBinary(r io.Reader) error {
- var dec struct {
- Owner common.Hash
- NodeType uint8
- Val []byte
- Key []byte
- }
- if err := gob.NewDecoder(r).Decode(&dec); err != nil {
- return err
- }
- st.owner = dec.Owner
- st.nodeType = dec.NodeType
- st.val = dec.Val
- st.key = dec.Key
-
- var hasChild = make([]byte, 1)
- for i := range st.children {
- if _, err := r.Read(hasChild); err != nil {
- return err
- } else if hasChild[0] == 0 {
- continue
- }
- var child StackTrie
- if err := child.unmarshalBinary(r); err != nil {
- return err
- }
- st.children[i] = &child
- }
- return nil
-}
-
-func (st *StackTrie) setWriter(writeFn NodeWriteFunc) {
- st.writeFn = writeFn
- for _, child := range st.children {
- if child != nil {
- child.setWriter(writeFn)
- }
- }
-}
-
-func newLeaf(owner common.Hash, key, val []byte, writeFn NodeWriteFunc) *StackTrie {
- st := stackTrieFromPool(writeFn, owner)
- st.nodeType = leafNode
+// newLeaf constructs a leaf node with provided node key and value. The key
+// will be deep-copied in the function and safe to modify afterwards, but
+// value is not.
+func newLeaf(key, val []byte) *stNode {
+ st := stPool.Get().(*stNode)
+ st.typ = leafNode
st.key = append(st.key, key...)
st.val = val
return st
}
-func newExt(owner common.Hash, key []byte, child *StackTrie, writeFn NodeWriteFunc) *StackTrie {
- st := stackTrieFromPool(writeFn, owner)
- st.nodeType = extNode
+// newExt constructs an extension node with provided node key and child. The
+// key will be deep-copied in the function and safe to modify afterwards.
+func newExt(key []byte, child *stNode) *stNode {
+ st := stPool.Get().(*stNode)
+ st.typ = extNode
st.key = append(st.key, key...)
st.children[0] = child
return st
}
-// List all values that StackTrie#nodeType can hold
+// List all values that stNode#nodeType can hold
const (
emptyNode = iota
branchNode
@@ -202,59 +114,40 @@ leafNode
hashedNode
)
-// Update inserts a (key, value) pair into the stack trie.
-func (st *StackTrie) Update(key, value []byte) error {
- k := keybytesToHex(key)
- if len(value) == 0 {
- panic("deletion not supported")
- }
- st.insert(k[:len(k)-1], value, nil)
- return nil
-}
-
-// MustUpdate is a wrapper of Update and will omit any encountered error but
-// just print out an error message.
-func (st *StackTrie) MustUpdate(key, value []byte) {
- if err := st.Update(key, value); err != nil {
- log.Error("Unhandled trie error in StackTrie.Update", "err", err)
- }
-}
-
-func (st *StackTrie) Reset() {
- st.owner = common.Hash{}
- st.writeFn = nil
- st.key = st.key[:0]
- st.val = nil
- for i := range st.children {
- st.children[i] = nil
+func (n *stNode) reset() *stNode {
+ n.key = n.key[:0]
+ n.val = nil
+ for i := range n.children {
+ n.children[i] = nil
}
- st.nodeType = emptyNode
+ n.typ = emptyNode
+ return n
}
// Helper function that, given a full key, determines the index
// at which the chunk pointed by st.keyOffset is different from
// the same chunk in the full key.
-func (st *StackTrie) getDiffIndex(key []byte) int {
- for idx, nibble := range st.key {
+func (n *stNode) getDiffIndex(key []byte) int {
+ for idx, nibble := range n.key {
if nibble != key[idx] {
return idx
}
}
- return len(st.key)
+ return len(n.key)
}
// Helper function to that inserts a (key, value) pair into
// the trie.
-func (st *StackTrie) insert(key, value []byte, prefix []byte) {
- switch st.nodeType {
+func (t *StackTrie) insert(st *stNode, key, value []byte, prefix []byte) {
+ switch st.typ {
case branchNode: /* Branch */
idx := int(key[0])
// Unresolve elder siblings
for i := idx - 1; i >= 0; i-- {
if st.children[i] != nil {
- if st.children[i].nodeType != hashedNode {
- st.children[i].hash(append(prefix, byte(i)))
+ if st.children[i].typ != hashedNode {
+ t.hash(st.children[i], append(prefix, byte(i)))
}
break
}
@@ -262,9 +155,9 @@ }
// Add new child
if st.children[idx] == nil {
- st.children[idx] = newLeaf(st.owner, key[1:], value, st.writeFn)
+ st.children[idx] = newLeaf(key[1:], value)
} else {
- st.children[idx].insert(key[1:], value, append(prefix, key[0]))
+ t.insert(st.children[idx], key[1:], value, append(prefix, key[0]))
}
case extNode: /* Ext */
@@ -279,46 +172,46 @@ // for each of the differentiated subtrees.
if diffidx == len(st.key) {
// Ext key and key segment are identical, recurse into
// the child node.
- st.children[0].insert(key[diffidx:], value, append(prefix, key[:diffidx]...))
+ t.insert(st.children[0], key[diffidx:], value, append(prefix, key[:diffidx]...))
return
}
// Save the original part. Depending if the break is
// at the extension's last byte or not, create an
// intermediate extension or use the extension's child
// node directly.
- var n *StackTrie
+ var n *stNode
if diffidx < len(st.key)-1 {
// Break on the non-last byte, insert an intermediate
// extension. The path prefix of the newly-inserted
// extension should also contain the different byte.
- n = newExt(st.owner, st.key[diffidx+1:], st.children[0], st.writeFn)
- n.hash(append(prefix, st.key[:diffidx+1]...))
+ n = newExt(st.key[diffidx+1:], st.children[0])
+ t.hash(n, append(prefix, st.key[:diffidx+1]...))
} else {
// Break on the last byte, no need to insert
// an extension node: reuse the current node.
// The path prefix of the original part should
// still be same.
n = st.children[0]
- n.hash(append(prefix, st.key...))
+ t.hash(n, append(prefix, st.key...))
}
- var p *StackTrie
+ var p *stNode
if diffidx == 0 {
// the break is on the first byte, so
// the current node is converted into
// a branch node.
st.children[0] = nil
p = st
- st.nodeType = branchNode
+ st.typ = branchNode
} else {
// the common prefix is at least one byte
// long, insert a new intermediate branch
// node.
- st.children[0] = stackTrieFromPool(st.writeFn, st.owner)
- st.children[0].nodeType = branchNode
+ st.children[0] = stPool.Get().(*stNode)
+ st.children[0].typ = branchNode
p = st.children[0]
}
// Create a leaf for the inserted part
- o := newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
+ o := newLeaf(key[diffidx+1:], value)
// Insert both child leaves where they belong:
origIdx := st.key[diffidx]
@@ -344,18 +237,18 @@
// Check if the split occurs at the first nibble of the
// chunk. In that case, no prefix extnode is necessary.
// Otherwise, create that
- var p *StackTrie
+ var p *stNode
if diffidx == 0 {
// Convert current leaf into a branch
- st.nodeType = branchNode
+ st.typ = branchNode
p = st
st.children[0] = nil
} else {
// Convert current node into an ext,
// and insert a child branch node.
- st.nodeType = extNode
- st.children[0] = NewStackTrieWithOwner(st.writeFn, st.owner)
- st.children[0].nodeType = branchNode
+ st.typ = extNode
+ st.children[0] = stPool.Get().(*stNode)
+ st.children[0].typ = branchNode
p = st.children[0]
}
@@ -363,11 +256,11 @@ // Create the two child leaves: one containing the original
// value and another containing the new value. The child leaf
// is hashed directly in order to free up some memory.
origIdx := st.key[diffidx]
- p.children[origIdx] = newLeaf(st.owner, st.key[diffidx+1:], st.val, st.writeFn)
- p.children[origIdx].hash(append(prefix, st.key[:diffidx+1]...))
+ p.children[origIdx] = newLeaf(st.key[diffidx+1:], st.val)
+ t.hash(p.children[origIdx], append(prefix, st.key[:diffidx+1]...))
newIdx := key[diffidx]
- p.children[newIdx] = newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
+ p.children[newIdx] = newLeaf(key[diffidx+1:], value)
// Finally, cut off the key part that has been passed
// over to the children.
@@ -375,7 +268,7 @@ st.key = st.key[:diffidx]
st.val = nil
case emptyNode: /* Empty */
- st.nodeType = leafNode
+ st.typ = leafNode
st.key = key
st.val = value
@@ -398,25 +291,18 @@ // - Then the <32 byte rlp-encoded value will be accessible in 'st.val'.
// - And the 'st.type' will be 'hashedNode' AGAIN
//
// This method also sets 'st.type' to hashedNode, and clears 'st.key'.
-func (st *StackTrie) hash(path []byte) {
- h := newHasher(false)
- defer returnHasherToPool(h)
-
- st.hashRec(h, path)
-}
-
-func (st *StackTrie) hashRec(hasher *hasher, path []byte) {
+func (t *StackTrie) hash(st *stNode, path []byte) {
// The switch below sets this to the RLP-encoding of this node.
var encodedNode []byte
- switch st.nodeType {
+ switch st.typ {
case hashedNode:
return
case emptyNode:
st.val = types.EmptyRootHash.Bytes()
st.key = st.key[:0]
- st.nodeType = hashedNode
+ st.typ = hashedNode
return
case branchNode:
@@ -426,50 +312,46 @@ if child == nil {
nodes.Children[i] = nilValueNode
continue
}
- child.hashRec(hasher, append(path, byte(i)))
+ t.hash(child, append(path, byte(i)))
+
if len(child.val) < 32 {
nodes.Children[i] = rawNode(child.val)
} else {
nodes.Children[i] = hashNode(child.val)
}
-
- // Release child back to pool.
st.children[i] = nil
- returnToPool(child)
+ stPool.Put(child.reset()) // Release child back to pool.
}
-
- nodes.encode(hasher.encbuf)
- encodedNode = hasher.encodedBytes()
+ nodes.encode(t.h.encbuf)
+ encodedNode = t.h.encodedBytes()
case extNode:
- st.children[0].hashRec(hasher, append(path, st.key...))
+ t.hash(st.children[0], append(path, st.key...))
- n := shortNode{Key: hexToCompact(st.key)}
+ n := shortNode{Key: hexToCompactInPlace(st.key)}
if len(st.children[0].val) < 32 {
n.Val = rawNode(st.children[0].val)
} else {
n.Val = hashNode(st.children[0].val)
}
-
- n.encode(hasher.encbuf)
- encodedNode = hasher.encodedBytes()
+ n.encode(t.h.encbuf)
+ encodedNode = t.h.encodedBytes()
- // Release child back to pool.
- returnToPool(st.children[0])
+ stPool.Put(st.children[0].reset()) // Release child back to pool.
st.children[0] = nil
case leafNode:
st.key = append(st.key, byte(16))
- n := shortNode{Key: hexToCompact(st.key), Val: valueNode(st.val)}
+ n := shortNode{Key: hexToCompactInPlace(st.key), Val: valueNode(st.val)}
- n.encode(hasher.encbuf)
- encodedNode = hasher.encodedBytes()
+ n.encode(t.h.encbuf)
+ encodedNode = t.h.encodedBytes()
default:
panic("invalid node type")
}
- st.nodeType = hashedNode
+ st.typ = hashedNode
st.key = st.key[:0]
if len(encodedNode) < 32 {
st.val = common.CopyBytes(encodedNode)
@@ -478,18 +360,16 @@ }
// Write the hash to the 'val'. We allocate a new val here to not mutate
// input values
- st.val = hasher.hashData(encodedNode)
- if st.writeFn != nil {
- st.writeFn(st.owner, path, common.BytesToHash(st.val), encodedNode)
+ st.val = t.h.hashData(encodedNode)
+ if t.writeFn != nil {
+ t.writeFn(path, common.BytesToHash(st.val), encodedNode)
}
}
// Hash returns the hash of the current node.
-func (st *StackTrie) Hash() (h common.Hash) {
- hasher := newHasher(false)
- defer returnHasherToPool(hasher)
-
- st.hashRec(hasher, nil)
+func (t *StackTrie) Hash() (h common.Hash) {
+ st := t.root
+ t.hash(st, nil)
if len(st.val) == 32 {
copy(h[:], st.val)
return h
@@ -497,9 +377,9 @@ }
// If the node's RLP isn't 32 bytes long, the node will not
// be hashed, and instead contain the rlp-encoding of the
// node. For the top level node, we need to force the hashing.
- hasher.sha.Reset()
- hasher.sha.Write(st.val)
- hasher.sha.Read(h[:])
+ t.h.sha.Reset()
+ t.h.sha.Write(st.val)
+ t.h.sha.Read(h[:])
return h
}
@@ -510,14 +390,12 @@ // here is to commit the root node.
//
// The associated database is expected, otherwise the whole commit
// functionality should be disabled.
-func (st *StackTrie) Commit() (h common.Hash, err error) {
- if st.writeFn == nil {
+func (t *StackTrie) Commit() (h common.Hash, err error) {
+ if t.writeFn == nil {
return common.Hash{}, ErrCommitDisabled
}
- hasher := newHasher(false)
- defer returnHasherToPool(hasher)
-
- st.hashRec(hasher, nil)
+ st := t.root
+ t.hash(st, nil)
if len(st.val) == 32 {
copy(h[:], st.val)
return h, nil
@@ -525,10 +403,10 @@ }
// If the node's RLP isn't 32 bytes long, the node will not
// be hashed (and committed), and instead contain the rlp-encoding of the
// node. For the top level node, we need to force the hashing+commit.
- hasher.sha.Reset()
- hasher.sha.Write(st.val)
- hasher.sha.Read(h[:])
+ t.h.sha.Reset()
+ t.h.sha.Write(st.val)
+ t.h.sha.Read(h[:])
- st.writeFn(st.owner, nil, h, st.val)
+ t.writeFn(nil, h, st.val)
return h, nil
}
diff --git ethereum/go-ethereum/trie/stacktrie_test.go taikoxyz/taiko-geth/trie/stacktrie_test.go
index ea3eef78820dd83c0ac39660e7214eda45d61799..0e52781c62ec789a0545f6c81a08fd16cc3100e0 100644
--- ethereum/go-ethereum/trie/stacktrie_test.go
+++ taikoxyz/taiko-geth/trie/stacktrie_test.go
@@ -165,13 +165,44 @@ {"123d", "x___________________________1", "8f09663deb02f08958136410dc48565e077f76bb6c9d8c84d35fc8913a657d31"},
{"123e", "x___________________________2", "0d230561e398c579e09a9f7b69ceaf7d3970f5a436fdb28b68b7a37c5bdd6b80"},
{"13aa", "x___________________________3", "ff0dc70ce2e5db90ee42a4c2ad12139596b890e90eb4e16526ab38fa465b35cf"},
},
+ { // branch node with short values
+ {"01", "a", "b48605025f5f4b129d40a420e721aa7d504487f015fce85b96e52126365ef7dc"},
+ {"80", "b", "2dc6b680daf74db067cb7aeaad73265ded93d96fce190fcbf64f498d475672ab"},
+ {"ee", "c", "017dc705a54ac5328dd263fa1bae68d655310fb3e3f7b7bc57e9a43ddf99c4bf"},
+ {"ff", "d", "bd5a3584d271d459bd4eb95247b2fc88656b3671b60c1125ffe7bc0b689470d0"},
+ },
+ { // ext node with short branch node, then becoming long
+ {"a0", "a", "a83e028cb1e4365935661a9fd36a5c65c30b9ab416eaa877424146ca2a69d088"},
+ {"a1", "b", "f586a4639b07b01798ca65e05c253b75d51135ebfbf6f8d6e87c0435089e65f0"},
+ {"a2", "c", "63e297c295c008e09a8d531e18d57f270b6bc403e23179b915429db948cd62e3"},
+ {"a3", "d", "94a7b721535578e9381f1f4e4b6ec29f8bdc5f0458a30320684c562f5d47b4b5"},
+ {"a4", "e", "4b7e66d1c81965cdbe8fab8295ef56bc57fefdc5733d4782d2f8baf630f083c6"},
+ {"a5", "f", "2997e7b502198ce1783b5277faacf52b25844fb55a99b63e88bdbbafac573106"},
+ {"a6", "g", "bee629dd27a40772b2e1a67ec6db270d26acdf8d3b674dfae27866ad6ae1f48b"},
+ },
+ { // branch node with short values, then long ones
+ {"a001", "v1", "b9cc982d995392b51e6787f1915f0b88efd4ad8b30f138da0a3e2242f2323e35"},
+ {"b002", "v2", "a7b474bc77ef5097096fa0ee6298fdae8928c0bc3724e7311cd0fa9ed1942fc7"},
+ {"c003", "v___________________________3", "dceb5bb7c92b0e348df988a8d9fc36b101397e38ebd405df55ba6ee5f14a264a"},
+ {"d004", "v___________________________4", "36e60ecb86b9626165e1c6543c42ecbe4d83bca58e8e1124746961511fce362a"},
+ },
+ { // ext node to branch node with short values, then long ones
+ {"8002", "v1", "3258fcb3e9e7d7234ecd3b8d4743999e4ab3a21592565e0a5ca64c141e8620d9"},
+ {"8004", "v2", "b6cb95b7024a83c17624a3c9bed09b4b5e8ed426f49f54b8ad13c39028b1e75a"},
+ {"8008", "v___________________________3", "c769d82963abe6f0900bf69754738eeb2f84559777cfa87a44f54e1aab417871"},
+ {"800d", "v___________________________4", "1cad1fdaab1a6fa95d7b780fd680030e423eb76669971368ba04797a8d9cdfc9"},
+ },
+ { // ext node with a child of size 31 (Y) and branch node with a child of size 31 (X)
+ {"000001", "ZZZZZZZZZ", "cef154b87c03c563408520ff9b26923c360cbc3ddb590c079bedeeb25a8c9c77"},
+ {"000002", "Y", "2130735e600f612f6e657a32bd7be64ddcaec6512c5694844b19de713922895d"},
+ {"000003", "XXXXXXXXXXXXXXXXXXXXXXXXXXXX", "962c0fffdeef7612a4f7bff1950d67e3e81c878e48b9ae45b3b374253b050bd8"},
+ },
}
- st := NewStackTrie(nil)
for i, test := range tests {
// The StackTrie does not allow Insert(), Hash(), Insert(), ...
// so we will create new trie for every sequence length of inserts.
for l := 1; l <= len(test); l++ {
- st.Reset()
+ st := NewStackTrie(nil)
for j := 0; j < l; j++ {
kv := &test[j]
if err := st.Update(common.FromHex(kv.K), []byte(kv.V)); err != nil {
@@ -188,7 +219,7 @@ }
func TestSizeBug(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -203,7 +234,7 @@ }
func TestEmptyBug(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -229,7 +260,7 @@ }
func TestValLength56(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -254,7 +285,7 @@ // TestUpdateSmallNodes tests a case where the leaves are small (both key and value),
// which causes a lot of node-within-node. This case was found via fuzzing.
func TestUpdateSmallNodes(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
kvs := []struct {
K string
V string
@@ -282,7 +313,7 @@ // This case was found via fuzzing.
func TestUpdateVariableKeys(t *testing.T) {
t.SkipNow()
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
kvs := []struct {
K string
V string
@@ -345,48 +376,3 @@ t.Fatalf("item %d, have %#x want %#x", i, have, want)
}
}
}
-
-// TestStacktrieSerialization tests that the stacktrie works well if we
-// serialize/unserialize it a lot
-func TestStacktrieSerialization(t *testing.T) {
- var (
- st = NewStackTrie(nil)
- nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- keyB = big.NewInt(1)
- keyDelta = big.NewInt(1)
- vals [][]byte
- keys [][]byte
- )
- getValue := func(i int) []byte {
- if i%2 == 0 { // large
- return crypto.Keccak256(big.NewInt(int64(i)).Bytes())
- } else { //small
- return big.NewInt(int64(i)).Bytes()
- }
- }
- for i := 0; i < 10; i++ {
- vals = append(vals, getValue(i))
- keys = append(keys, common.BigToHash(keyB).Bytes())
- keyB = keyB.Add(keyB, keyDelta)
- keyDelta.Add(keyDelta, common.Big1)
- }
- for i, k := range keys {
- nt.Update(k, common.CopyBytes(vals[i]))
- }
-
- for i, k := range keys {
- blob, err := st.MarshalBinary()
- if err != nil {
- t.Fatal(err)
- }
- newSt, err := NewFromBinary(blob, nil)
- if err != nil {
- t.Fatal(err)
- }
- st = newSt
- st.Update(k, common.CopyBytes(vals[i]))
- }
- if have, want := st.Hash(), nt.Hash(); have != want {
- t.Fatalf("have %#x want %#x", have, want)
- }
-}
diff --git ethereum/go-ethereum/trie/sync.go taikoxyz/taiko-geth/trie/sync.go
index 4f55845991796307b7b81562fa0d50f57b39c828..6939aed76d4b941695207db20272ee99607aebb0 100644
--- ethereum/go-ethereum/trie/sync.go
+++ taikoxyz/taiko-geth/trie/sync.go
@@ -27,6 +27,7 @@ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
)
// ErrNotRequested is returned by the trie sync when it's requested to process a
@@ -41,6 +42,16 @@ // maxFetchesPerDepth is the maximum number of pending trie nodes per depth. The
// role of this value is to limit the number of trie nodes that get expanded in
// memory if the node was configured with a significant number of peers.
const maxFetchesPerDepth = 16384
+
+var (
+ // deletionGauge is the metric to track how many trie node deletions
+ // are performed in total during the sync process.
+ deletionGauge = metrics.NewRegisteredGauge("trie/sync/delete", nil)
+
+ // lookupGauge is the metric to track how many trie node lookups are
+ // performed to determine if node needs to be deleted.
+ lookupGauge = metrics.NewRegisteredGauge("trie/sync/lookup", nil)
+)
// SyncPath is a path tuple identifying a particular trie node either in a single
// trie (account) or a layered trie (account -> storage).
@@ -93,9 +104,10 @@ type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
// nodeRequest represents a scheduled or already in-flight trie node retrieval request.
type nodeRequest struct {
- hash common.Hash // Hash of the trie node to retrieve
- path []byte // Merkle path leading to this node for prioritization
- data []byte // Data content of the node, cached until all subtrees complete
+ hash common.Hash // Hash of the trie node to retrieve
+ path []byte // Merkle path leading to this node for prioritization
+ data []byte // Data content of the node, cached until all subtrees complete
+ deletes [][]byte // List of internal path segments for trie nodes to delete
parent *nodeRequest // Parent state node referencing this entry
deps int // Number of dependencies before allowed to commit this node
@@ -125,18 +137,20 @@
// syncMemBatch is an in-memory buffer of successfully downloaded but not yet
// persisted data items.
type syncMemBatch struct {
- nodes map[string][]byte // In-memory membatch of recently completed nodes
- hashes map[string]common.Hash // Hashes of recently completed nodes
- codes map[common.Hash][]byte // In-memory membatch of recently completed codes
- size uint64 // Estimated batch-size of in-memory data.
+ nodes map[string][]byte // In-memory membatch of recently completed nodes
+ hashes map[string]common.Hash // Hashes of recently completed nodes
+ deletes map[string]struct{} // List of paths for trie node to delete
+ codes map[common.Hash][]byte // In-memory membatch of recently completed codes
+ size uint64 // Estimated batch-size of in-memory data.
}
// newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes.
func newSyncMemBatch() *syncMemBatch {
return &syncMemBatch{
- nodes: make(map[string][]byte),
- hashes: make(map[string]common.Hash),
- codes: make(map[common.Hash][]byte),
+ nodes: make(map[string][]byte),
+ hashes: make(map[string]common.Hash),
+ deletes: make(map[string]struct{}),
+ codes: make(map[common.Hash][]byte),
}
}
@@ -288,7 +302,7 @@ return nodePaths, nodeHashes, codeHashes
}
// ProcessCode injects the received data for requested item. Note it can
-// happpen that the single response commits two pending requests(e.g.
+// happen that the single response commits two pending requests(e.g.
// there are two requests one for code and one for node but the hash
// is same). In this case the second response for the same hash will
// be treated as "non-requested" item or "already-processed" item but
@@ -347,16 +361,23 @@
// Commit flushes the data stored in the internal membatch out to persistent
// storage, returning any occurred error.
func (s *Sync) Commit(dbw ethdb.Batch) error {
- // Dump the membatch into a database dbw
+ // Flush the pending node writes into database batch.
for path, value := range s.membatch.nodes {
owner, inner := ResolvePath([]byte(path))
rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme)
}
+ // Flush the pending node deletes into the database batch.
+ // Please note that each written and deleted node has a
+ // unique path, ensuring no duplication occurs.
+ for path := range s.membatch.deletes {
+ owner, inner := ResolvePath([]byte(path))
+ rawdb.DeleteTrieNode(dbw, owner, inner, common.Hash{} /* unused */, s.scheme)
+ }
+ // Flush the pending code writes into database batch.
for hash, value := range s.membatch.codes {
rawdb.WriteCode(dbw, hash, value)
}
- // Drop the membatch data and return
- s.membatch = newSyncMemBatch()
+ s.membatch = newSyncMemBatch() // reset the batch
return nil
}
@@ -370,7 +391,7 @@ func (s *Sync) Pending() int {
return len(s.nodeReqs) + len(s.codeReqs)
}
-// schedule inserts a new state retrieval request into the fetch queue. If there
+// scheduleNodeRequest inserts a new state retrieval request into the fetch queue. If there
// is already a pending request for this node, the new request will be discarded
// and only a parent reference added to the old one.
func (s *Sync) scheduleNodeRequest(req *nodeRequest) {
@@ -385,7 +406,7 @@ }
s.queue.Push(string(req.path), prio)
}
-// schedule inserts a new state retrieval request into the fetch queue. If there
+// scheduleCodeRequest inserts a new state retrieval request into the fetch queue. If there
// is already a pending request for this node, the new request will be discarded
// and only a parent reference added to the old one.
func (s *Sync) scheduleCodeRequest(req *codeRequest) {
@@ -425,6 +446,39 @@ children = []childNode{{
node: node.Val,
path: append(append([]byte(nil), req.path...), key...),
}}
+ // Mark all internal nodes between shortNode and its **in disk**
+ // child as invalid. This is essential in the case of path mode
+ // scheme; otherwise, state healing might overwrite existing child
+ // nodes silently while leaving a dangling parent node within the
+ // range of this internal path on disk. This would break the
+ // guarantee for state healing.
+ //
+ // While it's possible for this shortNode to overwrite a previously
+ // existing full node, the other branches of the fullNode can be
+ // retained as they remain untouched and complete.
+ //
+ // This step is only necessary for path mode, as there is no deletion
+ // in hash mode at all.
+ if _, ok := node.Val.(hashNode); ok && s.scheme == rawdb.PathScheme {
+ owner, inner := ResolvePath(req.path)
+ for i := 1; i < len(key); i++ {
+ // While checking for a non-existent item in Pebble can be less efficient
+ // without a bloom filter, the relatively low frequency of lookups makes
+ // the performance impact negligible.
+ var exists bool
+ if owner == (common.Hash{}) {
+ exists = rawdb.ExistsAccountTrieNode(s.database, append(inner, key[:i]...))
+ } else {
+ exists = rawdb.ExistsStorageTrieNode(s.database, owner, append(inner, key[:i]...))
+ }
+ if exists {
+ req.deletes = append(req.deletes, key[:i])
+ deletionGauge.Inc(1)
+ log.Debug("Detected dangling node", "owner", owner, "path", append(inner, key[:i]...))
+ }
+ }
+ lookupGauge.Inc(int64(len(key) - 1))
+ }
case *fullNode:
for i := 0; i < 17; i++ {
if node.Children[i] != nil {
@@ -502,17 +556,26 @@ }
return requests, nil
}
-// commit finalizes a retrieval request and stores it into the membatch. If any
+// commitNodeRequest finalizes a retrieval request and stores it into the membatch. If any
// of the referencing parent requests complete due to this commit, they are also
// committed themselves.
func (s *Sync) commitNodeRequest(req *nodeRequest) error {
// Write the node content to the membatch
s.membatch.nodes[string(req.path)] = req.data
s.membatch.hashes[string(req.path)] = req.hash
+
// The size tracking refers to the db-batch, not the in-memory data.
- // Therefore, we ignore the req.path, and account only for the hash+data
- // which eventually is written to db.
- s.membatch.size += common.HashLength + uint64(len(req.data))
+ if s.scheme == rawdb.PathScheme {
+ s.membatch.size += uint64(len(req.path) + len(req.data))
+ } else {
+ s.membatch.size += common.HashLength + uint64(len(req.data))
+ }
+ // Delete the internal nodes which are marked as invalid
+ for _, segment := range req.deletes {
+ path := append(req.path, segment...)
+ s.membatch.deletes[string(path)] = struct{}{}
+ s.membatch.size += uint64(len(path))
+ }
delete(s.nodeReqs, string(req.path))
s.fetches[len(req.path)]--
@@ -528,7 +591,7 @@ }
return nil
}
-// commit finalizes a retrieval request and stores it into the membatch. If any
+// commitCodeRequest finalizes a retrieval request and stores it into the membatch. If any
// of the referencing parent requests complete due to this commit, they are also
// committed themselves.
func (s *Sync) commitCodeRequest(req *codeRequest) error {
diff --git ethereum/go-ethereum/trie/sync_test.go taikoxyz/taiko-geth/trie/sync_test.go
index b6fe8d84a6df241ab5ba85f4b04802a006aa46eb..3b7986ef67922c185022b2461d9d73c380fe34ab 100644
--- ethereum/go-ethereum/trie/sync_test.go
+++ taikoxyz/taiko-geth/trie/sync_test.go
@@ -70,31 +70,53 @@ }
// checkTrieContents cross references a reconstructed trie with an expected data
// content map.
-func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte) {
+func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte, rawTrie bool) {
// Check root availability and trie contents
ndb := newTestDatabase(db, scheme)
- trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb)
- if err != nil {
- t.Fatalf("failed to create trie at %x: %v", root, err)
+ if err := checkTrieConsistency(db, scheme, common.BytesToHash(root), rawTrie); err != nil {
+ t.Fatalf("inconsistent trie at %x: %v", root, err)
}
- if err := checkTrieConsistency(db, scheme, common.BytesToHash(root)); err != nil {
- t.Fatalf("inconsistent trie at %x: %v", root, err)
+ type reader interface {
+ MustGet(key []byte) []byte
+ }
+ var r reader
+ if rawTrie {
+ trie, err := New(TrieID(common.BytesToHash(root)), ndb)
+ if err != nil {
+ t.Fatalf("failed to create trie at %x: %v", root, err)
+ }
+ r = trie
+ } else {
+ trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb)
+ if err != nil {
+ t.Fatalf("failed to create trie at %x: %v", root, err)
+ }
+ r = trie
}
for key, val := range content {
- if have := trie.MustGet([]byte(key)); !bytes.Equal(have, val) {
+ if have := r.MustGet([]byte(key)); !bytes.Equal(have, val) {
t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
}
}
}
// checkTrieConsistency checks that all nodes in a trie are indeed present.
-func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) error {
+func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash, rawTrie bool) error {
ndb := newTestDatabase(db, scheme)
- trie, err := NewStateTrie(TrieID(root), ndb)
- if err != nil {
- return nil // Consider a non existent state consistent
+ var it NodeIterator
+ if rawTrie {
+ trie, err := New(TrieID(root), ndb)
+ if err != nil {
+ return nil // Consider a non existent state consistent
+ }
+ it = trie.MustNodeIterator(nil)
+ } else {
+ trie, err := NewStateTrie(TrieID(root), ndb)
+ if err != nil {
+ return nil // Consider a non existent state consistent
+ }
+ it = trie.MustNodeIterator(nil)
}
- it := trie.MustNodeIterator(nil)
for it.Next(true) {
}
return it.Error()
@@ -109,8 +131,8 @@ }
// Tests that an empty trie is not scheduled for syncing.
func TestEmptySync(t *testing.T) {
- dbA := NewDatabase(rawdb.NewMemoryDatabase())
- dbB := NewDatabase(rawdb.NewMemoryDatabase())
+ dbA := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
+ dbB := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
@@ -205,7 +227,7 @@ })
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
@@ -271,7 +293,7 @@ })
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
}
// Tests that given a root hash, a trie can sync iteratively on a single thread,
@@ -341,7 +363,7 @@ }
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
@@ -413,7 +435,7 @@ }
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
}
// Tests that a trie sync will not request nodes multiple times, even if they
@@ -484,7 +506,7 @@ })
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
}
// Tests that at any point in time during a sync, only complete sub-tries are in
@@ -569,7 +591,7 @@ owner, inner := ResolvePath([]byte(path))
nodeHash := addedHashes[i]
value := rawdb.ReadTrieNode(diskdb, owner, inner, nodeHash, scheme)
rawdb.DeleteTrieNode(diskdb, owner, inner, nodeHash, scheme)
- if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root); err == nil {
+ if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root, false); err == nil {
t.Fatalf("trie inconsistency not caught, missing: %x", path)
}
rawdb.WriteTrieNode(diskdb, owner, inner, nodeHash, value, scheme)
@@ -643,7 +665,7 @@ reqs = append(reqs, NewSyncPath([]byte(paths[i])))
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
// Check that the trie nodes have been requested path-ordered
for i := 0; i < len(reqs)-1; i++ {
@@ -664,7 +686,7 @@ sched := NewSync(root, db, nil, srcDb.Scheme())
// The code requests are ignored here since there is no code
// at the testing trie.
- paths, nodes, _ := sched.Missing(1)
+ paths, nodes, _ := sched.Missing(0)
var elements []trieElement
for i := 0; i < len(paths); i++ {
elements = append(elements, trieElement{
@@ -698,7 +720,7 @@ t.Fatalf("failed to commit data: %v", err)
}
batch.Write()
- paths, nodes, _ = sched.Missing(1)
+ paths, nodes, _ = sched.Missing(0)
elements = elements[:0]
for i := 0; i < len(paths); i++ {
elements = append(elements, trieElement{
@@ -724,7 +746,7 @@
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
syncWith(t, srcTrie.Hash(), diskdb, srcDb)
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
// Push more modifications into the src trie, to see if dest trie can still
// sync with it(overwrite stale states)
@@ -748,7 +770,7 @@ preRoot = root
srcTrie, _ = NewStateTrie(TrieID(root), srcDb)
syncWith(t, srcTrie.Hash(), diskdb, srcDb)
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff, false)
// Revert added modifications from the src trie, to see if dest trie can still
// sync with it(overwrite reverted states)
@@ -772,5 +794,98 @@ }
srcTrie, _ = NewStateTrie(TrieID(root), srcDb)
syncWith(t, srcTrie.Hash(), diskdb, srcDb)
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted, false)
+}
+
+// Tests if state syncer can correctly catch up the pivot move.
+func TestPivotMove(t *testing.T) {
+ testPivotMove(t, rawdb.HashScheme, true)
+ testPivotMove(t, rawdb.HashScheme, false)
+ testPivotMove(t, rawdb.PathScheme, true)
+ testPivotMove(t, rawdb.PathScheme, false)
+}
+
+func testPivotMove(t *testing.T, scheme string, tiny bool) {
+ var (
+ srcDisk = rawdb.NewMemoryDatabase()
+ srcTrieDB = newTestDatabase(srcDisk, scheme)
+ srcTrie, _ = New(TrieID(types.EmptyRootHash), srcTrieDB)
+
+ deleteFn = func(key []byte, tr *Trie, states map[string][]byte) {
+ tr.Delete(key)
+ delete(states, string(key))
+ }
+ writeFn = func(key []byte, val []byte, tr *Trie, states map[string][]byte) {
+ if val == nil {
+ if tiny {
+ val = randBytes(4)
+ } else {
+ val = randBytes(32)
+ }
+ }
+ tr.Update(key, val)
+ states[string(key)] = common.CopyBytes(val)
+ }
+ copyStates = func(states map[string][]byte) map[string][]byte {
+ cpy := make(map[string][]byte)
+ for k, v := range states {
+ cpy[k] = v
+ }
+ return cpy
+ }
+ )
+ stateA := make(map[string][]byte)
+ writeFn([]byte{0x01, 0x23}, nil, srcTrie, stateA)
+ writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateA)
+ writeFn([]byte{0x12, 0x33}, nil, srcTrie, stateA)
+ writeFn([]byte{0x12, 0x34}, nil, srcTrie, stateA)
+ writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateA)
+ writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateA)
+
+ rootA, nodesA, _ := srcTrie.Commit(false)
+ if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil {
+ panic(err)
+ }
+ if err := srcTrieDB.Commit(rootA, false); err != nil {
+ panic(err)
+ }
+ // Create a destination trie and sync with the scheduler
+ destDisk := rawdb.NewMemoryDatabase()
+ syncWith(t, rootA, destDisk, srcTrieDB)
+ checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateA, true)
+
+ // Delete element to collapse trie
+ stateB := copyStates(stateA)
+ srcTrie, _ = New(TrieID(rootA), srcTrieDB)
+ deleteFn([]byte{0x02, 0x34}, srcTrie, stateB)
+ deleteFn([]byte{0x13, 0x44}, srcTrie, stateB)
+ writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateB)
+
+ rootB, nodesB, _ := srcTrie.Commit(false)
+ if err := srcTrieDB.Update(rootB, rootA, 0, trienode.NewWithNodeSet(nodesB), nil); err != nil {
+ panic(err)
+ }
+ if err := srcTrieDB.Commit(rootB, false); err != nil {
+ panic(err)
+ }
+ syncWith(t, rootB, destDisk, srcTrieDB)
+ checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateB, true)
+
+ // Add elements to expand trie
+ stateC := copyStates(stateB)
+ srcTrie, _ = New(TrieID(rootB), srcTrieDB)
+
+ writeFn([]byte{0x01, 0x24}, stateA[string([]byte{0x01, 0x24})], srcTrie, stateC)
+ writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateC)
+ writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateC)
+
+ rootC, nodesC, _ := srcTrie.Commit(false)
+ if err := srcTrieDB.Update(rootC, rootB, 0, trienode.NewWithNodeSet(nodesC), nil); err != nil {
+ panic(err)
+ }
+ if err := srcTrieDB.Commit(rootC, false); err != nil {
+ panic(err)
+ }
+ syncWith(t, rootC, destDisk, srcTrieDB)
+ checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateC, true)
}
diff --git ethereum/go-ethereum/trie/tracer_test.go taikoxyz/taiko-geth/trie/tracer_test.go
index 86daec6d2077c3eccbc2544b132a4234d5d42b0e..acb8c2f6bf4ff8e6d692458215b86bdbf7ed36de 100644
--- ethereum/go-ethereum/trie/tracer_test.go
+++ taikoxyz/taiko-geth/trie/tracer_test.go
@@ -61,7 +61,7 @@
// Tests if the trie diffs are tracked correctly. Tracer should capture
// all non-leaf dirty nodes, no matter the node is embedded or not.
func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
+ db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(db)
// Determine all new nodes are tracked
@@ -104,7 +104,7 @@ testTrieTracerNoop(t, standard)
}
func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for _, val := range vals {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
@@ -128,7 +128,7 @@ }
func testAccessList(t *testing.T, vals []struct{ k, v string }) {
var (
- db = NewDatabase(rawdb.NewMemoryDatabase())
+ db = NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie = NewEmpty(db)
orig = trie.Copy()
)
@@ -211,7 +211,7 @@
// Tests origin values won't be tracked in Iterator or Prover
func TestAccessListLeak(t *testing.T) {
var (
- db = NewDatabase(rawdb.NewMemoryDatabase())
+ db = NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie = NewEmpty(db)
)
// Create trie from scratch
@@ -262,7 +262,7 @@ // Tests whether the original tree node is correctly deleted after being embedded
// in its parent due to the smaller size of the original tree node.
func TestTinyTree(t *testing.T) {
var (
- db = NewDatabase(rawdb.NewMemoryDatabase())
+ db = NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie = NewEmpty(db)
)
for _, val := range tiny {
diff --git ethereum/go-ethereum/trie/trie_reader.go taikoxyz/taiko-geth/trie/trie_reader.go
index 1c63ff4544fd3f3f7358d4840447e42107e0c67a..42159645590fbe425e6f1f2a3c2daeee62a528e5 100644
--- ethereum/go-ethereum/trie/trie_reader.go
+++ taikoxyz/taiko-geth/trie/trie_reader.go
@@ -20,6 +20,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie/triestate"
)
// Reader wraps the Node method of a backing trie store.
@@ -83,3 +84,18 @@ return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
}
return blob, nil
}
+
+// trieLoader implements triestate.TrieLoader for constructing tries.
+type trieLoader struct {
+ db *Database
+}
+
+// OpenTrie opens the main account trie.
+func (l *trieLoader) OpenTrie(root common.Hash) (triestate.Trie, error) {
+ return New(TrieID(root), l.db)
+}
+
+// OpenStorageTrie opens the storage trie of an account.
+func (l *trieLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) {
+ return New(StorageTrieID(stateRoot, addrHash, root), l.db)
+}
diff --git ethereum/go-ethereum/trie/trie_test.go taikoxyz/taiko-geth/trie/trie_test.go
index 3cb21c1956b312735bbb1a23fa06484a2275844b..2dfe81ef81d2ffd7860c169074ae6c808b1c8a1a 100644
--- ethereum/go-ethereum/trie/trie_test.go
+++ taikoxyz/taiko-geth/trie/trie_test.go
@@ -45,7 +45,7 @@ spew.Config.DisableMethods = false
}
func TestEmptyTrie(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
res := trie.Hash()
exp := types.EmptyRootHash
if res != exp {
@@ -54,7 +54,7 @@ }
}
func TestNull(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
key := make([]byte, 32)
value := []byte("test")
trie.MustUpdate(key, value)
@@ -64,8 +64,13 @@ }
}
func TestMissingRoot(t *testing.T) {
+ testMissingRoot(t, rawdb.HashScheme)
+ testMissingRoot(t, rawdb.PathScheme)
+}
+
+func testMissingRoot(t *testing.T, scheme string) {
root := common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")
- trie, err := New(TrieID(root), NewDatabase(rawdb.NewMemoryDatabase()))
+ trie, err := New(TrieID(root), newTestDatabase(rawdb.NewMemoryDatabase(), scheme))
if trie != nil {
t.Error("New returned non-nil trie for invalid root")
}
@@ -161,7 +166,7 @@ }
}
func TestInsert(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy")
@@ -173,7 +178,7 @@ if root != exp {
t.Errorf("case 1: exp %x got %x", exp, root)
}
- trie = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")
@@ -184,7 +189,7 @@ }
}
func TestGet(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
+ db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(db)
updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy")
@@ -209,7 +214,7 @@ }
}
func TestDelete(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
@@ -236,7 +241,7 @@ }
}
func TestEmptyValues(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -260,7 +265,7 @@ }
}
func TestReplication(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
+ db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(db)
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -321,7 +326,7 @@ }
}
func TestLargeValue(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
trie.MustUpdate([]byte("key1"), []byte{99, 99, 99, 99})
trie.MustUpdate([]byte("key2"), bytes.Repeat([]byte{1}, 32))
trie.Hash()
@@ -604,12 +609,14 @@
const benchElemCount = 20000
func benchGet(b *testing.B) {
- triedb := NewDatabase(rawdb.NewMemoryDatabase())
+ triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(triedb)
k := make([]byte, 32)
for i := 0; i < benchElemCount; i++ {
binary.LittleEndian.PutUint64(k, uint64(i))
- trie.MustUpdate(k, k)
+ v := make([]byte, 32)
+ binary.LittleEndian.PutUint64(v, uint64(i))
+ trie.MustUpdate(k, v)
}
binary.LittleEndian.PutUint64(k, benchElemCount/2)
@@ -621,12 +628,14 @@ b.StopTimer()
}
func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
k := make([]byte, 32)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
+ v := make([]byte, 32)
e.PutUint64(k, uint64(i))
- trie.MustUpdate(k, k)
+ e.PutUint64(v, uint64(i))
+ trie.MustUpdate(k, v)
}
return trie
}
@@ -651,7 +660,7 @@ // Create a realistic account trie to hash. We're first adding and hashing N
// entries, then adding N more.
addresses, accounts := makeAccounts(2 * b.N)
// Insert the accounts into the trie and hash it
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
i := 0
for ; i < len(addresses)/2; i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
@@ -682,7 +691,7 @@
func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) {
// Make the random benchmark deterministic
addresses, accounts := makeAccounts(b.N)
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
@@ -696,7 +705,7 @@
func TestTinyTrie(t *testing.T) {
// Create a realistic account trie to hash
_, accounts := makeAccounts(5)
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3])
if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root {
t.Errorf("1: got %x, exp %x", root, exp)
@@ -709,7 +718,7 @@ trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001339"), accounts[4])
if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root {
t.Errorf("3: got %x, exp %x", root, exp)
}
- checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() {
checktr.MustUpdate(it.Key, it.Value)
@@ -722,7 +731,7 @@
func TestCommitAfterHash(t *testing.T) {
// Create a realistic account trie to hash
addresses, accounts := makeAccounts(1000)
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
@@ -788,11 +797,17 @@ func (s *spongeDb) Stat(property string) (string, error) { panic("implement me") }
func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") }
func (s *spongeDb) Close() error { return nil }
func (s *spongeDb) Put(key []byte, value []byte) error {
- valbrief := value
+ var (
+ keybrief = key
+ valbrief = value
+ )
+ if len(keybrief) > 8 {
+ keybrief = keybrief[:8]
+ }
if len(valbrief) > 8 {
valbrief = valbrief[:8]
}
- s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, key[:8], len(value), valbrief))
+ s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, keybrief, len(value), valbrief))
s.sponge.Write(key)
s.sponge.Write(value)
return nil
@@ -830,7 +845,7 @@ } {
addresses, accounts := makeAccounts(tc.count)
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := NewDatabase(rawdb.NewDatabase(s))
+ db := NewDatabase(rawdb.NewDatabase(s), nil)
trie := NewEmpty(db)
// Fill the trie with elements
for i := 0; i < tc.count; i++ {
@@ -861,7 +876,7 @@ } {
prng := rand.New(rand.NewSource(int64(i)))
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := NewDatabase(rawdb.NewDatabase(s))
+ db := NewDatabase(rawdb.NewDatabase(s), nil)
trie := NewEmpty(db)
// Fill the trie with elements
for i := 0; i < tc.count; i++ {
@@ -893,12 +908,12 @@ for count := 1; count < 200; count++ {
prng := rand.New(rand.NewSource(int64(count)))
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
- db := NewDatabase(rawdb.NewDatabase(s))
+ db := NewDatabase(rawdb.NewDatabase(s), nil)
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
- stTrie := NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme())
+ stTrie := NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
})
// Fill the trie with elements
for i := 0; i < count; i++ {
@@ -952,12 +967,12 @@ // that even a small trie which contains a leaf will have an extension making it
// not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do.
func TestCommitSequenceSmallRoot(t *testing.T) {
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
- db := NewDatabase(rawdb.NewDatabase(s))
+ db := NewDatabase(rawdb.NewDatabase(s), nil)
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
- stTrie := NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme())
+ stTrie := NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
})
// Add a single small-element to the trie(s)
key := make([]byte, 5)
@@ -1029,7 +1044,7 @@ }
func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs()
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
@@ -1080,7 +1095,7 @@ }
func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs()
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
@@ -1132,7 +1147,7 @@ }
func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs()
- triedb := NewDatabase(rawdb.NewMemoryDatabase())
+ triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(triedb)
for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
diff --git ethereum/go-ethereum/trie/triedb/hashdb/database.go taikoxyz/taiko-geth/trie/triedb/hashdb/database.go
index 4441f2a3827d2a6b7d2a681936e1ca90f6a2e789..764ab24ec8dd9f4c0ed1a6fcf806f7c463d14ae0 100644
--- ethereum/go-ethereum/trie/triedb/hashdb/database.go
+++ taikoxyz/taiko-geth/trie/triedb/hashdb/database.go
@@ -65,6 +65,20 @@ type ChildResolver interface {
ForEach(node []byte, onChild func(common.Hash))
}
+// Config contains the settings for database.
+type Config struct {
+ CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes
+}
+
+// Defaults is the default setting for database if it's not specified.
+// Notably, clean cache is disabled explicitly,
+var Defaults = &Config{
+ // Explicitly set clean cache size to 0 to avoid creating fastcache,
+ // otherwise database must be closed when it's no longer needed to
+ // prevent memory leak.
+ CleanCacheSize: 0,
+}
+
// Database is an intermediate write layer between the trie data structures and
// the disk database. The aim is to accumulate trie writes in-memory and only
// periodically flush a couple tries to disk, garbage collecting the remainder.
@@ -122,12 +136,13 @@ resolver.ForEach(n.node, onChild)
}
// New initializes the hash-based node database.
-func New(diskdb ethdb.Database, size int, resolver ChildResolver) *Database {
- // Initialize the clean cache if the specified cache allowance
- // is non-zero. Note, the size is in bytes.
+func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Database {
+ if config == nil {
+ config = Defaults
+ }
var cleans *fastcache.Cache
- if size > 0 {
- cleans = fastcache.New(size)
+ if config.CleanCacheSize > 0 {
+ cleans = fastcache.New(config.CleanCacheSize)
}
return &Database{
diskdb: diskdb,
@@ -609,7 +624,10 @@ }
// Size returns the current storage size of the memory cache in front of the
// persistent database layer.
-func (db *Database) Size() common.StorageSize {
+//
+// The first return will always be 0, representing the memory stored in unbounded
+// diff layers above the dirty cache. This is only available in pathdb.
+func (db *Database) Size() (common.StorageSize, common.StorageSize) {
db.lock.RLock()
defer db.lock.RUnlock()
@@ -617,11 +635,17 @@ // db.dirtiesSize only contains the useful data in the cache, but when reporting
// the total memory consumption, the maintenance metadata is also needed to be
// counted.
var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize)
- return db.dirtiesSize + db.childrenSize + metadataSize
+ return 0, db.dirtiesSize + db.childrenSize + metadataSize
}
// Close closes the trie database and releases all held resources.
-func (db *Database) Close() error { return nil }
+func (db *Database) Close() error {
+ if db.cleans != nil {
+ db.cleans.Reset()
+ db.cleans = nil
+ }
+ return nil
+}
// Scheme returns the node scheme used in the database.
func (db *Database) Scheme() string {
diff --git ethereum/go-ethereum/trie/triedb/pathdb/database.go taikoxyz/taiko-geth/trie/triedb/pathdb/database.go
index 29f6b5e103cd9f138eea954484da071f2cf4e5be..dc64414e9b520aeca5a08e6ca44b090680729dcf 100644
--- ethereum/go-ethereum/trie/triedb/pathdb/database.go
+++ taikoxyz/taiko-geth/trie/triedb/pathdb/database.go
@@ -33,8 +33,26 @@ "github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/triestate"
)
-// maxDiffLayers is the maximum diff layers allowed in the layer tree.
-const maxDiffLayers = 128
+const (
+ // maxDiffLayers is the maximum diff layers allowed in the layer tree.
+ maxDiffLayers = 128
+
+ // defaultCleanSize is the default memory allowance of clean cache.
+ defaultCleanSize = 16 * 1024 * 1024
+
+ // maxBufferSize is the maximum memory allowance of node buffer.
+ // Too large nodebuffer will cause the system to pause for a long
+ // time when write happens. Also, the largest batch that pebble can
+ // support is 4GB, node will panic if batch size exceeds this limit.
+ maxBufferSize = 256 * 1024 * 1024
+
+ // DefaultBufferSize is the default memory allowance of node buffer
+ // that aggregates the writes from above until it's flushed into the
+ // disk. It's meant to be used once the initial sync is finished.
+ // Do not increase the buffer size arbitrarily, otherwise the system
+ // pause time will increase when the database writes happen.
+ DefaultBufferSize = 64 * 1024 * 1024
+)
// layer is the interface implemented by all state layers which includes some
// public methods and some additional methods for internal usage.
@@ -68,29 +86,32 @@ }
// Config contains the settings for database.
type Config struct {
- StateLimit uint64 // Number of recent blocks to maintain state history for
- CleanSize int // Maximum memory allowance (in bytes) for caching clean nodes
- DirtySize int // Maximum memory allowance (in bytes) for caching dirty nodes
- ReadOnly bool // Flag whether the database is opened in read only mode.
+ StateHistory uint64 // Number of recent blocks to maintain state history for
+ CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes
+ DirtyCacheSize int // Maximum memory allowance (in bytes) for caching dirty nodes
+ ReadOnly bool // Flag whether the database is opened in read only mode.
}
-var (
- // defaultCleanSize is the default memory allowance of clean cache.
- defaultCleanSize = 16 * 1024 * 1024
-
- // defaultBufferSize is the default memory allowance of node buffer
- // that aggregates the writes from above until it's flushed into the
- // disk. Do not increase the buffer size arbitrarily, otherwise the
- // system pause time will increase when the database writes happen.
- defaultBufferSize = 128 * 1024 * 1024
-)
+// sanitize checks the provided user configurations and changes anything that's
+// unreasonable or unworkable.
+func (c *Config) sanitize() *Config {
+ conf := *c
+ if conf.DirtyCacheSize > maxBufferSize {
+ log.Warn("Sanitizing invalid node buffer size", "provided", common.StorageSize(conf.DirtyCacheSize), "updated", common.StorageSize(maxBufferSize))
+ conf.DirtyCacheSize = maxBufferSize
+ }
+ return &conf
+}
// Defaults contains default settings for Ethereum mainnet.
var Defaults = &Config{
- StateLimit: params.FullImmutabilityThreshold,
- CleanSize: defaultCleanSize,
- DirtySize: defaultBufferSize,
+ StateHistory: params.FullImmutabilityThreshold,
+ CleanCacheSize: defaultCleanSize,
+ DirtyCacheSize: DefaultBufferSize,
}
+
+// ReadOnly is the config in order to open database in read only mode.
+var ReadOnly = &Config{ReadOnly: true}
// Database is a multiple-layered structure for maintaining in-memory trie nodes.
// It consists of one persistent base layer backed by a key-value store, on top
@@ -107,7 +128,8 @@ type Database struct {
// readOnly is the flag whether the mutation is allowed to be applied.
// It will be set automatically when the database is journaled during
// the shutdown to reject all following unexpected mutations.
- readOnly bool // Indicator if database is opened in read only mode
+ readOnly bool // Flag if database is opened in read only mode
+ waitSync bool // Flag if database is deactivated due to initial state sync
bufferSize int // Memory allowance (in bytes) for caching dirty nodes
config *Config // Configuration for database
diskdb ethdb.Database // Persistent storage for matured trie nodes
@@ -123,9 +145,11 @@ func New(diskdb ethdb.Database, config *Config) *Database {
if config == nil {
config = Defaults
}
+ config = config.sanitize()
+
db := &Database{
readOnly: config.ReadOnly,
- bufferSize: config.DirtySize,
+ bufferSize: config.DirtyCacheSize,
config: config,
diskdb: diskdb,
}
@@ -140,7 +164,7 @@ // Because the freezer can only be opened once at the same time, this
// mechanism also ensures that at most one **non-readOnly** database
// is opened at the same time to prevent accidental mutation.
if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly {
- freezer, err := rawdb.NewStateHistoryFreezer(ancient, false)
+ freezer, err := rawdb.NewStateFreezer(ancient, false)
if err != nil {
log.Crit("Failed to open state history freezer", "err", err)
}
@@ -156,6 +180,12 @@ if pruned != 0 {
log.Warn("Truncated extra state histories", "number", pruned)
}
}
+ // Disable database in case node is still in the initial state sync stage.
+ if rawdb.ReadSnapSyncStatusFlag(diskdb) == rawdb.StateSyncRunning && !db.readOnly {
+ if err := db.Disable(); err != nil {
+ log.Crit("Failed to disable database", "err", err) // impossible to happen
+ }
+ }
log.Warn("Path-based state scheme is an experimental feature")
return db
}
@@ -181,9 +211,9 @@ // Hold the lock to prevent concurrent mutations.
db.lock.Lock()
defer db.lock.Unlock()
- // Short circuit if the database is in read only mode.
- if db.readOnly {
- return errSnapshotReadOnly
+ // Short circuit if the mutation is not allowed.
+ if err := db.modifyAllowed(); err != nil {
+ return err
}
if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil {
return err
@@ -204,45 +234,59 @@ // Hold the lock to prevent concurrent mutations.
db.lock.Lock()
defer db.lock.Unlock()
+ // Short circuit if the mutation is not allowed.
+ if err := db.modifyAllowed(); err != nil {
+ return err
+ }
+ return db.tree.cap(root, 0)
+}
+
+// Disable deactivates the database and invalidates all available state layers
+// as stale to prevent access to the persistent state, which is in the syncing
+// stage.
+func (db *Database) Disable() error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
// Short circuit if the database is in read only mode.
if db.readOnly {
- return errSnapshotReadOnly
+ return errDatabaseReadOnly
}
- return db.tree.cap(root, 0)
+ // Prevent duplicated disable operation.
+ if db.waitSync {
+ log.Error("Reject duplicated disable operation")
+ return nil
+ }
+ db.waitSync = true
+
+ // Mark the disk layer as stale to prevent access to persistent state.
+ db.tree.bottom().markStale()
+
+ // Write the initial sync flag to persist it across restarts.
+ rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncRunning)
+ log.Info("Disabled trie database due to state sync")
+ return nil
}
-// Reset rebuilds the database with the specified state as the base.
-//
-// - if target state is empty, clear the stored state and all layers on top
-// - if target state is non-empty, ensure the stored state matches with it
-// and clear all other layers on top.
-func (db *Database) Reset(root common.Hash) error {
+// Enable activates database and resets the state tree with the provided persistent
+// state root once the state sync is finished.
+func (db *Database) Enable(root common.Hash) error {
db.lock.Lock()
defer db.lock.Unlock()
// Short circuit if the database is in read only mode.
if db.readOnly {
- return errSnapshotReadOnly
+ return errDatabaseReadOnly
}
- batch := db.diskdb.NewBatch()
+ // Ensure the provided state root matches the stored one.
root = types.TrieRootHash(root)
- if root == types.EmptyRootHash {
- // Empty state is requested as the target, nuke out
- // the root node and leave all others as dangling.
- rawdb.DeleteAccountTrieNode(batch, nil)
- } else {
- // Ensure the requested state is existent before any
- // action is applied.
- _, hash := rawdb.ReadAccountTrieNode(db.diskdb, nil)
- if hash != root {
- return fmt.Errorf("state is mismatched, local: %x, target: %x", hash, root)
- }
+ _, stored := rawdb.ReadAccountTrieNode(db.diskdb, nil)
+ if stored != root {
+ return fmt.Errorf("state root mismatch: stored %x, synced %x", stored, root)
}
- // Mark the disk layer as stale before applying any mutation.
- db.tree.bottom().markStale()
-
// Drop the stale state journal in persistent database and
// reset the persistent state id back to zero.
+ batch := db.diskdb.NewBatch()
rawdb.DeleteTrieJournal(batch)
rawdb.WritePersistentStateID(batch, 0)
if err := batch.Write(); err != nil {
@@ -259,8 +303,11 @@ }
}
// Re-construct a new disk layer backed by persistent state
// with **empty clean cache and node buffer**.
- dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0))
- db.tree.reset(dl)
+ db.tree.reset(newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0)))
+
+ // Re-enable the database as the final step.
+ db.waitSync = false
+ rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncFinished)
log.Info("Rebuilt trie database", "root", root)
return nil
}
@@ -273,7 +320,10 @@ db.lock.Lock()
defer db.lock.Unlock()
// Short circuit if rollback operation is not supported.
- if db.readOnly || db.freezer == nil {
+ if err := db.modifyAllowed(); err != nil {
+ return err
+ }
+ if db.freezer == nil {
return errors.New("state rollback is non-supported")
}
// Short circuit if the target state is not recoverable.
@@ -344,7 +394,14 @@ func (db *Database) Close() error {
db.lock.Lock()
defer db.lock.Unlock()
+ // Set the database to read-only mode to prevent all
+ // following mutations.
db.readOnly = true
+
+ // Release the memory held by clean cache.
+ db.tree.bottom().resetCache()
+
+ // Close the attached state history freezer.
if db.freezer == nil {
return nil
}
@@ -353,16 +410,16 @@ }
// Size returns the current storage size of the memory cache in front of the
// persistent database layer.
-func (db *Database) Size() (size common.StorageSize) {
+func (db *Database) Size() (diffs common.StorageSize, nodes common.StorageSize) {
db.tree.forEach(func(layer layer) {
if diff, ok := layer.(*diffLayer); ok {
- size += common.StorageSize(diff.memory)
+ diffs += common.StorageSize(diff.memory)
}
if disk, ok := layer.(*diskLayer); ok {
- size += disk.size()
+ nodes += disk.size()
}
})
- return size
+ return diffs, nodes
}
// Initialized returns an indicator if the state data is already
@@ -382,6 +439,10 @@ func (db *Database) SetBufferSize(size int) error {
db.lock.Lock()
defer db.lock.Unlock()
+ if size > maxBufferSize {
+ log.Info("Capped node buffer size", "provided", common.StorageSize(size), "adjusted", common.StorageSize(maxBufferSize))
+ size = maxBufferSize
+ }
db.bufferSize = size
return db.tree.bottom().setBufferSize(db.bufferSize)
}
@@ -390,3 +451,15 @@ // Scheme returns the node scheme used in the database.
func (db *Database) Scheme() string {
return rawdb.PathScheme
}
+
+// modifyAllowed returns the indicator if mutation is allowed. This function
+// assumes the db.lock is already held.
+func (db *Database) modifyAllowed() error {
+ if db.readOnly {
+ return errDatabaseReadOnly
+ }
+ if db.waitSync {
+ return errDatabaseWaitSync
+ }
+ return nil
+}
diff --git ethereum/go-ethereum/trie/triedb/pathdb/database_test.go taikoxyz/taiko-geth/trie/triedb/pathdb/database_test.go
index bcc37e59c853050b769f8bc47133f2c41e3ed217..912364f7f44a90cfd1491b925bac5443aa51b7a0 100644
--- ethereum/go-ethereum/trie/triedb/pathdb/database_test.go
+++ taikoxyz/taiko-geth/trie/triedb/pathdb/database_test.go
@@ -46,7 +46,8 @@ } else {
h.Update(key.Bytes(), val)
}
}
- return h.Commit(false)
+ root, nodes, _ := h.Commit(false)
+ return root, nodes
}
func generateAccount(storageRoot common.Hash) types.StateAccount {
@@ -98,7 +99,7 @@
func newTester(t *testing.T) *tester {
var (
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
- db = New(disk, &Config{CleanSize: 256 * 1024, DirtySize: 256 * 1024})
+ db = New(disk, &Config{CleanCacheSize: 256 * 1024, DirtyCacheSize: 256 * 1024})
obj = &tester{
db: db,
preimages: make(map[common.Hash]common.Address),
@@ -438,38 +439,39 @@ }
}
}
-func TestReset(t *testing.T) {
- var (
- tester = newTester(t)
- index = tester.bottomIndex()
- )
+func TestDisable(t *testing.T) {
+ tester := newTester(t)
defer tester.release()
- // Reset database to unknown target, should reject it
- if err := tester.db.Reset(testutil.RandomHash()); err == nil {
- t.Fatal("Failed to reject invalid reset")
+ _, stored := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)
+ if err := tester.db.Disable(); err != nil {
+ t.Fatal("Failed to deactivate database")
+ }
+ if err := tester.db.Enable(types.EmptyRootHash); err == nil {
+ t.Fatalf("Invalid activation should be rejected")
}
- // Reset database to state persisted in the disk
- if err := tester.db.Reset(types.EmptyRootHash); err != nil {
- t.Fatalf("Failed to reset database %v", err)
+ if err := tester.db.Enable(stored); err != nil {
+ t.Fatal("Failed to activate database")
}
+
// Ensure journal is deleted from disk
if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 {
t.Fatal("Failed to clean journal")
}
// Ensure all trie histories are removed
- for i := 0; i <= index; i++ {
- _, err := readHistory(tester.db.freezer, uint64(i+1))
- if err == nil {
- t.Fatalf("Failed to clean state history, index %d", i+1)
- }
+ n, err := tester.db.freezer.Ancients()
+ if err != nil {
+ t.Fatal("Failed to clean state history")
+ }
+ if n != 0 {
+ t.Fatal("Failed to clean state history")
}
// Verify layer tree structure, single disk layer is expected
if tester.db.tree.len() != 1 {
t.Fatalf("Extra layer kept %d", tester.db.tree.len())
}
- if tester.db.tree.bottom().rootHash() != types.EmptyRootHash {
- t.Fatalf("Root hash is not matched exp %x got %x", types.EmptyRootHash, tester.db.tree.bottom().rootHash())
+ if tester.db.tree.bottom().rootHash() != stored {
+ t.Fatalf("Root hash is not matched exp %x got %x", stored, tester.db.tree.bottom().rootHash())
}
}
diff --git ethereum/go-ethereum/trie/triedb/pathdb/difflayer.go taikoxyz/taiko-geth/trie/triedb/pathdb/difflayer.go
index d25ac1c601d79ef86145abefe20a2fd02b7e4a11..10567715d2e71e98052ed8d755e1aedf0356d234 100644
--- ethereum/go-ethereum/trie/triedb/pathdb/difflayer.go
+++ taikoxyz/taiko-geth/trie/triedb/pathdb/difflayer.go
@@ -114,7 +114,7 @@ // bubble up an error here. It shouldn't happen at all.
if n.Hash != hash {
dirtyFalseMeter.Mark(1)
log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
- return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path)
+ return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path, n.Blob)
}
dirtyHitMeter.Mark(1)
dirtyNodeHitDepthHist.Update(int64(depth))
diff --git ethereum/go-ethereum/trie/triedb/pathdb/difflayer_test.go taikoxyz/taiko-geth/trie/triedb/pathdb/difflayer_test.go
index 77c4cd5722dad094e15606848a75abaed3bcca10..9b5907c3c5b30db61aa697b7c6dcfd97c17b36e6 100644
--- ethereum/go-ethereum/trie/triedb/pathdb/difflayer_test.go
+++ taikoxyz/taiko-geth/trie/triedb/pathdb/difflayer_test.go
@@ -29,7 +29,7 @@
func emptyLayer() *diskLayer {
return &diskLayer{
db: New(rawdb.NewMemoryDatabase(), nil),
- buffer: newNodeBuffer(defaultBufferSize, nil, 0),
+ buffer: newNodeBuffer(DefaultBufferSize, nil, 0),
}
}
diff --git ethereum/go-ethereum/trie/triedb/pathdb/disklayer.go taikoxyz/taiko-geth/trie/triedb/pathdb/disklayer.go
index b526b3b7dd9db98be4db1119bfa8afcb061dc512..d3b6419cc59484c6a24368b6b24a545ff1bf1973 100644
--- ethereum/go-ethereum/trie/triedb/pathdb/disklayer.go
+++ taikoxyz/taiko-geth/trie/triedb/pathdb/disklayer.go
@@ -47,8 +47,8 @@ func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.Cache, buffer *nodebuffer) *diskLayer {
// Initialize a clean cache if the memory allowance is not zero
// or reuse the provided cache if it is not nil (inherited from
// the original disk layer).
- if cleans == nil && db.config.CleanSize != 0 {
- cleans = fastcache.New(db.config.CleanSize)
+ if cleans == nil && db.config.CleanCacheSize != 0 {
+ cleans = fastcache.New(db.config.CleanCacheSize)
}
return &diskLayer{
root: root,
@@ -150,7 +150,7 @@ }
if nHash != hash {
diskFalseMeter.Mark(1)
log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash)
- return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path)
+ return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path, nBlob)
}
if dl.cleans != nil && len(nBlob) > 0 {
dl.cleans.Set(key, nBlob)
@@ -177,7 +177,7 @@ // after storing the state history but without flushing the
// corresponding states(journal), the stored state history will
// be truncated in the next restart.
if dl.db.freezer != nil {
- err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateLimit)
+ err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateHistory)
if err != nil {
return nil, err
}
@@ -274,6 +274,20 @@ if dl.stale {
return 0
}
return common.StorageSize(dl.buffer.size)
+}
+
+// resetCache releases the memory held by clean cache to prevent memory leak.
+func (dl *diskLayer) resetCache() {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // Stale disk layer loses the ownership of clean cache.
+ if dl.stale {
+ return
+ }
+ if dl.cleans != nil {
+ dl.cleans.Reset()
+ }
}
// hasher is used to compute the sha256 hash of the provided data.
diff --git ethereum/go-ethereum/trie/triedb/pathdb/errors.go taikoxyz/taiko-geth/trie/triedb/pathdb/errors.go
index f503a9c49d2e0a0284149b27d2e933ec12084968..78ee4459fe50943d103ca2400173c6786317dfd0 100644
--- ethereum/go-ethereum/trie/triedb/pathdb/errors.go
+++ taikoxyz/taiko-geth/trie/triedb/pathdb/errors.go
@@ -21,12 +21,17 @@ "errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
)
var (
- // errSnapshotReadOnly is returned if the database is opened in read only mode
- // and mutation is requested.
- errSnapshotReadOnly = errors.New("read only")
+ // errDatabaseReadOnly is returned if the database is opened in read only mode
+ // to prevent any mutation.
+ errDatabaseReadOnly = errors.New("read only")
+
+ // errDatabaseWaitSync is returned if the initial state sync is not completed
+ // yet and database is disabled to prevent accessing state.
+ errDatabaseWaitSync = errors.New("waiting for sync")
// errSnapshotStale is returned from data accessors if the underlying layer
// layer had been invalidated due to the chain progressing forward far enough
@@ -46,6 +51,10 @@ // not hash matched with expectation.
errUnexpectedNode = errors.New("unexpected node")
)
-func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte) error {
- return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x", errUnexpectedNode, loc, owner, path, expHash, gotHash)
+func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte, blob []byte) error {
+ blobHex := "nil"
+ if len(blob) > 0 {
+ blobHex = hexutil.Encode(blob)
+ }
+ return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x, blob: %s", errUnexpectedNode, loc, owner, path, expHash, gotHash, blobHex)
}
diff --git ethereum/go-ethereum/trie/triedb/pathdb/history_test.go taikoxyz/taiko-geth/trie/triedb/pathdb/history_test.go
index 6c250c2591ccb772a20daeb614bac0077b0a3e44..677103e2b0ad479e11ca05c916d9d8046a4deb1d 100644
--- ethereum/go-ethereum/trie/triedb/pathdb/history_test.go
+++ taikoxyz/taiko-geth/trie/triedb/pathdb/history_test.go
@@ -226,7 +226,7 @@ }
// openFreezer initializes the freezer instance for storing state histories.
func openFreezer(datadir string, readOnly bool) (*rawdb.ResettableFreezer, error) {
- return rawdb.NewStateHistoryFreezer(datadir, readOnly)
+ return rawdb.NewStateFreezer(datadir, readOnly)
}
func compareSet[k comparable](a, b map[k][]byte) bool {
diff --git ethereum/go-ethereum/trie/triedb/pathdb/journal.go taikoxyz/taiko-geth/trie/triedb/pathdb/journal.go
index d8c7d39fb9bdf794e52613c9b950c8be9d6d9a2e..ac770763e38df7315728ad1734720b8070776ec9 100644
--- ethereum/go-ethereum/trie/triedb/pathdb/journal.go
+++ taikoxyz/taiko-geth/trie/triedb/pathdb/journal.go
@@ -21,6 +21,7 @@ "bytes"
"errors"
"fmt"
"io"
+ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
@@ -341,13 +342,21 @@ l := db.tree.get(root)
if l == nil {
return fmt.Errorf("triedb layer [%#x] missing", root)
}
+ disk := db.tree.bottom()
+ if l, ok := l.(*diffLayer); ok {
+ log.Info("Persisting dirty state to disk", "head", l.block, "root", root, "layers", l.id-disk.id+disk.buffer.layers)
+ } else { // disk layer only on noop runs (likely) or deep reorgs (unlikely)
+ log.Info("Persisting dirty state to disk", "root", root, "layers", disk.buffer.layers)
+ }
+ start := time.Now()
+
// Run the journaling
db.lock.Lock()
defer db.lock.Unlock()
// Short circuit if the database is in read only mode.
if db.readOnly {
- return errSnapshotReadOnly
+ return errDatabaseReadOnly
}
// Firstly write out the metadata of journal
journal := new(bytes.Buffer)
@@ -373,6 +382,6 @@ rawdb.WriteTrieJournal(db.diskdb, journal.Bytes())
// Set the db in read only mode to reject all following mutations
db.readOnly = true
- log.Info("Stored journal in triedb", "disk", diskroot, "size", common.StorageSize(journal.Len()))
+ log.Info("Persisted dirty state to disk", "size", common.StorageSize(journal.Len()), "elapsed", common.PrettyDuration(time.Since(start)))
return nil
}
diff --git ethereum/go-ethereum/trie/triedb/pathdb/nodebuffer.go taikoxyz/taiko-geth/trie/triedb/pathdb/nodebuffer.go
index 67de225b0495993b833a0ed49d1f7cb2bc4f0c58..4a7d328b9afb8fbbad679028debbaf3a07146581 100644
--- ethereum/go-ethereum/trie/triedb/pathdb/nodebuffer.go
+++ taikoxyz/taiko-geth/trie/triedb/pathdb/nodebuffer.go
@@ -71,7 +71,7 @@ }
if n.Hash != hash {
dirtyFalseMeter.Mark(1)
log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
- return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path)
+ return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path, n.Blob)
}
return n, nil
}
diff --git ethereum/go-ethereum/trie/triedb/pathdb/testutils.go taikoxyz/taiko-geth/trie/triedb/pathdb/testutils.go
index 4406dbc52124ab93c8c6fa5dc1525f2e05f4a1df..d6fdacb4213e376492b7564e627e964c2f97214f 100644
--- ethereum/go-ethereum/trie/triedb/pathdb/testutils.go
+++ taikoxyz/taiko-geth/trie/triedb/pathdb/testutils.go
@@ -80,7 +80,7 @@ }
// Commit computes the new hash of the states and returns the set with all
// state changes.
-func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
+func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
var (
nodes = make(map[common.Hash][]byte)
set = trienode.NewNodeSet(h.owner)
@@ -108,7 +108,7 @@ }
if root == types.EmptyRootHash && h.root != types.EmptyRootHash {
set.AddNode(nil, trienode.NewDeleted())
}
- return root, set
+ return root, set, nil
}
// hash performs the hash computation upon the provided states.
diff --git ethereum/go-ethereum/light/nodeset.go taikoxyz/taiko-geth/trie/trienode/proof.go
rename from light/nodeset.go
rename to trie/trienode/proof.go
index 3662596785c79f85250d949eb653b4872f9827d0..012f0087dded262bea272f84e71c4c5c0af7337b 100644
--- ethereum/go-ethereum/light/nodeset.go
+++ taikoxyz/taiko-geth/trie/trienode/proof.go
@@ -14,7 +14,7 @@ //
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-package light
+package trienode
import (
"errors"
@@ -26,9 +26,9 @@ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
)
-// NodeSet stores a set of trie nodes. It implements trie.Database and can also
+// ProofSet stores a set of trie nodes. It implements trie.Database and can also
// act as a cache for another trie.Database.
-type NodeSet struct {
+type ProofSet struct {
nodes map[string][]byte
order []string
@@ -36,15 +36,15 @@ dataSize int
lock sync.RWMutex
}
-// NewNodeSet creates an empty node set
-func NewNodeSet() *NodeSet {
- return &NodeSet{
+// NewProofSet creates an empty node set
+func NewProofSet() *ProofSet {
+ return &ProofSet{
nodes: make(map[string][]byte),
}
}
// Put stores a new node in the set
-func (db *NodeSet) Put(key []byte, value []byte) error {
+func (db *ProofSet) Put(key []byte, value []byte) error {
db.lock.Lock()
defer db.lock.Unlock()
@@ -61,7 +61,7 @@ return nil
}
// Delete removes a node from the set
-func (db *NodeSet) Delete(key []byte) error {
+func (db *ProofSet) Delete(key []byte) error {
db.lock.Lock()
defer db.lock.Unlock()
@@ -70,7 +70,7 @@ return nil
}
// Get returns a stored node
-func (db *NodeSet) Get(key []byte) ([]byte, error) {
+func (db *ProofSet) Get(key []byte) ([]byte, error) {
db.lock.RLock()
defer db.lock.RUnlock()
@@ -81,13 +81,13 @@ return nil, errors.New("not found")
}
// Has returns true if the node set contains the given key
-func (db *NodeSet) Has(key []byte) (bool, error) {
+func (db *ProofSet) Has(key []byte) (bool, error) {
_, err := db.Get(key)
return err == nil, nil
}
// KeyCount returns the number of nodes in the set
-func (db *NodeSet) KeyCount() int {
+func (db *ProofSet) KeyCount() int {
db.lock.RLock()
defer db.lock.RUnlock()
@@ -95,19 +95,19 @@ return len(db.nodes)
}
// DataSize returns the aggregated data size of nodes in the set
-func (db *NodeSet) DataSize() int {
+func (db *ProofSet) DataSize() int {
db.lock.RLock()
defer db.lock.RUnlock()
return db.dataSize
}
-// NodeList converts the node set to a NodeList
-func (db *NodeSet) NodeList() NodeList {
+// List converts the node set to a ProofList
+func (db *ProofSet) List() ProofList {
db.lock.RLock()
defer db.lock.RUnlock()
- var values NodeList
+ var values ProofList
for _, key := range db.order {
values = append(values, db.nodes[key])
}
@@ -115,7 +115,7 @@ return values
}
// Store writes the contents of the set to the given database
-func (db *NodeSet) Store(target ethdb.KeyValueWriter) {
+func (db *ProofSet) Store(target ethdb.KeyValueWriter) {
db.lock.RLock()
defer db.lock.RUnlock()
@@ -124,36 +124,36 @@ target.Put([]byte(key), value)
}
}
-// NodeList stores an ordered list of trie nodes. It implements ethdb.KeyValueWriter.
-type NodeList []rlp.RawValue
+// ProofList stores an ordered list of trie nodes. It implements ethdb.KeyValueWriter.
+type ProofList []rlp.RawValue
// Store writes the contents of the list to the given database
-func (n NodeList) Store(db ethdb.KeyValueWriter) {
+func (n ProofList) Store(db ethdb.KeyValueWriter) {
for _, node := range n {
db.Put(crypto.Keccak256(node), node)
}
}
-// NodeSet converts the node list to a NodeSet
-func (n NodeList) NodeSet() *NodeSet {
- db := NewNodeSet()
+// Set converts the node list to a ProofSet
+func (n ProofList) Set() *ProofSet {
+ db := NewProofSet()
n.Store(db)
return db
}
// Put stores a new node at the end of the list
-func (n *NodeList) Put(key []byte, value []byte) error {
+func (n *ProofList) Put(key []byte, value []byte) error {
*n = append(*n, value)
return nil
}
// Delete panics as there's no reason to remove a node from the list.
-func (n *NodeList) Delete(key []byte) error {
+func (n *ProofList) Delete(key []byte) error {
panic("not supported")
}
// DataSize returns the aggregated data size of nodes in the list
-func (n NodeList) DataSize() int {
+func (n ProofList) DataSize() int {
var size int
for _, node := range n {
size += len(node)
diff --git ethereum/go-ethereum/trie/triestate/state.go taikoxyz/taiko-geth/trie/triestate/state.go
index cb3611baf9cd08db2bcda099e728d33489a65464..4c47e9c3973495ed881c75315a2f011b3bcfef6c 100644
--- ethereum/go-ethereum/trie/triestate/state.go
+++ taikoxyz/taiko-geth/trie/triestate/state.go
@@ -43,7 +43,7 @@ Delete(key []byte) error
// Commit the trie and returns a set of dirty nodes generated along with
// the new root hash.
- Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet)
+ Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
}
// TrieLoader wraps functions to load tries.
@@ -129,7 +129,10 @@ if err != nil {
return nil, fmt.Errorf("failed to revert state, err: %w", err)
}
}
- root, result := tr.Commit(false)
+ root, result, err := tr.Commit(false)
+ if err != nil {
+ return nil, err
+ }
if root != prevRoot {
return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root)
}
@@ -181,7 +184,10 @@ if err != nil {
return err
}
}
- root, result := st.Commit(false)
+ root, result, err := st.Commit(false)
+ if err != nil {
+ return err
+ }
if root != prev.Root {
return errors.New("failed to reset storage trie")
}
@@ -232,7 +238,10 @@ if err := st.Delete(key.Bytes()); err != nil {
return err
}
}
- root, result := st.Commit(false)
+ root, result, err := st.Commit(false)
+ if err != nil {
+ return err
+ }
if root != types.EmptyRootHash {
return errors.New("failed to clear storage trie")
}